===================================================================
@@ -2044,26 +2044,26 @@ integer_pow2p (const_tree expr)
prec = TYPE_PRECISION (TREE_TYPE (expr));
high = TREE_INT_CST_HIGH (expr);
low = TREE_INT_CST_LOW (expr);
/* First clear all bits that are beyond the type's precision in case
we've been sign extended. */
if (prec == HOST_BITS_PER_DOUBLE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
else
{
high = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
+ low &= ~(HOST_WIDE_INT_M1U << prec);
}
if (high == 0 && low == 0)
return 0;
return ((high == 0 && (low & (low - 1)) == 0)
|| (low == 0 && (high & (high - 1)) == 0));
}
/* Return 1 if EXPR is an integer constant other than zero or a
@@ -2108,26 +2108,26 @@ tree_log2 (const_tree expr)
prec = TYPE_PRECISION (TREE_TYPE (expr));
high = TREE_INT_CST_HIGH (expr);
low = TREE_INT_CST_LOW (expr);
/* First clear all bits that are beyond the type's precision in case
we've been sign extended. */
if (prec == HOST_BITS_PER_DOUBLE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
else
{
high = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
+ low &= ~(HOST_WIDE_INT_M1U << prec);
}
return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high)
: exact_log2 (low));
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
than or equal to EXPR. */
int
@@ -2145,26 +2145,26 @@ tree_floor_log2 (const_tree expr)
high = TREE_INT_CST_HIGH (expr);
low = TREE_INT_CST_LOW (expr);
/* First clear all bits that are beyond the type's precision in case
we've been sign extended. Ignore if type's precision hasn't been set
since what we are doing is setting it. */
if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
else
{
high = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
+ low &= ~(HOST_WIDE_INT_M1U << prec);
}
return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high)
: floor_log2 (low));
}
/* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for
decimal float constants, so don't return 1 for them. */
int
===================================================================
@@ -1988,28 +1988,27 @@ simplify_const_unary_operation (enum rtx
if (REAL_VALUES_LESS (t, x))
{
xh = th;
xl = tl;
break;
}
/* Test against the signed lower bound. */
if (width > HOST_BITS_PER_WIDE_INT)
{
- th = (unsigned HOST_WIDE_INT) (-1)
- << (width - HOST_BITS_PER_WIDE_INT - 1);
+ th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
tl = 0;
}
else
{
th = -1;
- tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
+ tl = HOST_WIDE_INT_M1U << (width - 1);
}
real_from_integer (&t, VOIDmode, tl, th, 0);
if (REAL_VALUES_LESS (x, t))
{
xh = th;
xl = tl;
break;
}
REAL_VALUE_TO_INT (&xl, &xh, x);
break;
@@ -4160,21 +4159,21 @@ simplify_const_binary_operation (enum rt
arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
return 0;
val = (code == ASHIFT
? ((unsigned HOST_WIDE_INT) arg0) << arg1
: ((unsigned HOST_WIDE_INT) arg0) >> arg1);
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
+ val |= HOST_WIDE_INT_M1U << (width - arg1);
break;
case ROTATERT:
if (arg1 < 0)
return 0;
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
| (((unsigned HOST_WIDE_INT) arg0) >> arg1));
break;
===================================================================
@@ -5374,21 +5374,21 @@ cse_insn (rtx insn)
may not equal what was stored, due to truncation. */
if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT)
{
rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
if (src_const != 0 && CONST_INT_P (src_const)
&& CONST_INT_P (width)
&& INTVAL (width) < HOST_BITS_PER_WIDE_INT
&& ! (INTVAL (src_const)
- & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
+ & (HOST_WIDE_INT_M1U << INTVAL (width))))
/* Exception: if the value is constant,
and it won't be truncated, record it. */
;
else
{
/* This is chosen so that the destination will be invalidated
but no new value will be recorded.
We must invalidate because sometimes constant
values can be recorded for bitfields. */
sets[i].src_elt = 0;
===================================================================
@@ -3733,32 +3733,30 @@ sign_bit_p (tree exp, const_tree val)
if (TREE_CODE (val) != INTEGER_CST
|| TREE_OVERFLOW (val))
return NULL_TREE;
width = TYPE_PRECISION (t);
if (width > HOST_BITS_PER_WIDE_INT)
{
hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
lo = 0;
- mask_hi = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_DOUBLE_INT - width));
+ mask_hi = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_DOUBLE_INT - width));
mask_lo = -1;
}
else
{
hi = 0;
lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
mask_hi = 0;
- mask_lo = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - width));
+ mask_lo = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - width));
}
/* We mask off those bits beyond TREE_TYPE (exp) so that we can
treat VAL as if it were unsigned. */
if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
&& (TREE_INT_CST_LOW (val) & mask_lo) == lo)
return exp;
/* Handle extension from a narrower type. */
if (TREE_CODE (exp) == NOP_EXPR
@@ -4318,21 +4316,21 @@ build_range_check (location_t loc, tree
prec = TYPE_PRECISION (etype);
if (prec <= HOST_BITS_PER_WIDE_INT)
{
hi = 0;
lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1;
}
else
{
hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1;
- lo = (unsigned HOST_WIDE_INT) -1;
+ lo = HOST_WIDE_INT_M1U;
}
if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo)
{
if (TYPE_UNSIGNED (etype))
{
tree signed_etype = signed_type_for (etype);
if (TYPE_PRECISION (signed_etype) != TYPE_PRECISION (etype))
etype
= build_nonstandard_integer_type (TYPE_PRECISION (etype), 0);
@@ -8089,21 +8087,21 @@ fold_unary_loc (location_t loc, enum tre
|| (TYPE_PRECISION (type)
<= TYPE_PRECISION (TREE_TYPE (and_expr))))
change = 1;
else if (TYPE_PRECISION (TREE_TYPE (and1))
<= HOST_BITS_PER_WIDE_INT
&& host_integerp (and1, 1))
{
unsigned HOST_WIDE_INT cst;
cst = tree_low_cst (and1, 1);
- cst &= (HOST_WIDE_INT) -1
+ cst &= HOST_WIDE_INT_M1U
<< (TYPE_PRECISION (TREE_TYPE (and1)) - 1);
change = (cst == 0);
#ifdef LOAD_EXTEND_OP
if (change
&& !flag_syntax_only
&& (LOAD_EXTEND_OP (TYPE_MODE (TREE_TYPE (and0)))
== ZERO_EXTEND))
{
tree uns = unsigned_type_for (TREE_TYPE (and0));
and0 = fold_convert_loc (loc, uns, and0);
@@ -11270,21 +11268,21 @@ fold_binary_loc (location_t loc,
unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
mode which allows further optimizations. */
c1 &= msk;
c2 &= msk;
c3 = c1.and_not (c2);
for (w = BITS_PER_UNIT;
w <= width && w <= HOST_BITS_PER_WIDE_INT;
w <<= 1)
{
unsigned HOST_WIDE_INT mask
- = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
+ = HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - w);
if (((c1.low | c2.low) & mask) == mask
&& (c1.low & ~mask) == 0 && c1.high == 0)
{
c3 = double_int::from_uhwi (mask);
break;
}
}
if (c3 != c1)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
fold_build2_loc (loc, BIT_AND_EXPR, type,
@@ -12290,21 +12288,21 @@ fold_binary_loc (location_t loc,
case ROUND_DIV_EXPR:
case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
if (integer_onep (arg1))
return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
if (integer_zerop (arg1))
return NULL_TREE;
/* X / -1 is -X. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1
+ && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U
&& TREE_INT_CST_HIGH (arg1) == -1)
return fold_convert_loc (loc, type, negate_expr (arg0));
/* Convert -A / -B to A / B when the type is signed and overflow is
undefined. */
if ((!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
&& TREE_CODE (arg0) == NEGATE_EXPR
&& negate_expr_p (arg1))
{
if (INTEGRAL_TYPE_P (type))
@@ -12373,21 +12371,21 @@ fold_binary_loc (location_t loc,
return NULL_TREE;
/* 0 % X is always zero, but be sure to preserve any side
effects in X. Place this after checking for X == 0. */
if (integer_zerop (arg0))
return omit_one_operand_loc (loc, type, integer_zero_node, arg1);
/* X % -1 is zero. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1
+ && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U
&& TREE_INT_CST_HIGH (arg1) == -1)
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
/* X % -C is the same as X % C. */
if (code == TRUNC_MOD_EXPR
&& !TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
&& !TREE_OVERFLOW (arg1)
&& TREE_INT_CST_HIGH (arg1) < 0
&& !TYPE_OVERFLOW_TRAPS (type)
@@ -13528,42 +13526,42 @@ fold_binary_loc (location_t loc,
if (TYPE_UNSIGNED (arg1_type))
{
max_lo = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
min_lo = 0;
min_hi = 0;
}
else
{
max_lo = signed_max_lo;
- min_lo = ((unsigned HOST_WIDE_INT) -1 << (width - 1));
+ min_lo = (HOST_WIDE_INT_M1U << (width - 1));
min_hi = -1;
}
}
else
{
width -= HOST_BITS_PER_WIDE_INT;
signed_max_lo = -1;
signed_max_hi = ((unsigned HOST_WIDE_INT) 1 << (width - 1))
- 1;
max_lo = -1;
min_lo = 0;
if (TYPE_UNSIGNED (arg1_type))
{
max_hi = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
min_hi = 0;
}
else
{
max_hi = signed_max_hi;
- min_hi = ((unsigned HOST_WIDE_INT) -1 << (width - 1));
+ min_hi = (HOST_WIDE_INT_M1U << (width - 1));
}
}
if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) == max_hi
&& TREE_INT_CST_LOW (arg1) == max_lo)
switch (code)
{
case GT_EXPR:
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
@@ -14168,38 +14166,38 @@ fold_ternary_loc (location_t loc, enum t
int inner_width, outer_width;
tree tem_type;
inner_width = TYPE_PRECISION (TREE_TYPE (tem));
outer_width = TYPE_PRECISION (TREE_TYPE (arg1));
if (outer_width > TYPE_PRECISION (type))
outer_width = TYPE_PRECISION (type);
if (outer_width > HOST_BITS_PER_WIDE_INT)
{
- mask_hi = ((unsigned HOST_WIDE_INT) -1
+ mask_hi = (HOST_WIDE_INT_M1U
>> (HOST_BITS_PER_DOUBLE_INT - outer_width));
mask_lo = -1;
}
else
{
mask_hi = 0;
- mask_lo = ((unsigned HOST_WIDE_INT) -1
+ mask_lo = (HOST_WIDE_INT_M1U
>> (HOST_BITS_PER_WIDE_INT - outer_width));
}
if (inner_width > HOST_BITS_PER_WIDE_INT)
{
- mask_hi &= ~((unsigned HOST_WIDE_INT) -1
+ mask_hi &= ~(HOST_WIDE_INT_M1U
>> (HOST_BITS_PER_WIDE_INT - inner_width));
mask_lo = 0;
}
else
- mask_lo &= ~((unsigned HOST_WIDE_INT) -1
+ mask_lo &= ~(HOST_WIDE_INT_M1U
>> (HOST_BITS_PER_WIDE_INT - inner_width));
if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == mask_hi
&& (TREE_INT_CST_LOW (arg1) & mask_lo) == mask_lo)
{
tem_type = signed_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
}
else if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == 0
&& (TREE_INT_CST_LOW (arg1) & mask_lo) == 0)
===================================================================
@@ -8113,22 +8113,22 @@ force_to_mode (rtx x, enum machine_mode
{
unsigned HOST_WIDE_INT cval
= UINTVAL (XEXP (x, 1))
| (GET_MODE_MASK (GET_MODE (x)) & ~mask);
int width = GET_MODE_PRECISION (GET_MODE (x));
rtx y;
/* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
number, sign extend it. */
if (width > 0 && width < HOST_BITS_PER_WIDE_INT
- && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
- cval |= (unsigned HOST_WIDE_INT) -1 << width;
+ && (cval & (HOST_WIDE_INT_1U << (width - 1))) != 0)
+ cval |= HOST_WIDE_INT_M1U << width;
y = simplify_gen_binary (AND, GET_MODE (x),
XEXP (x, 0), GEN_INT (cval));
if (set_src_cost (y, optimize_this_for_speed_p)
< set_src_cost (x, optimize_this_for_speed_p))
x = y;
}
break;
}
@@ -8142,22 +8142,22 @@ force_to_mode (rtx x, enum machine_mode
This may eliminate that PLUS and, later, the AND. */
{
unsigned int width = GET_MODE_PRECISION (mode);
unsigned HOST_WIDE_INT smask = mask;
/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
number, sign extend it. */
if (width < HOST_BITS_PER_WIDE_INT
- && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
- smask |= (unsigned HOST_WIDE_INT) (-1) << width;
+ && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
+ smask |= HOST_WIDE_INT_M1U << width;
if (CONST_INT_P (XEXP (x, 1))
&& exact_log2 (- smask) >= 0
&& (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
&& (INTVAL (XEXP (x, 1)) & ~smask) != 0)
return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
(INTVAL (XEXP (x, 1)) & smask)),
mode, smask, next_select);
}
===================================================================
@@ -264,27 +264,27 @@ rshift_double (unsigned HOST_WIDE_INT l1
if (count >= prec)
{
*hv = signmask;
*lv = signmask;
}
else if ((prec - count) >= HOST_BITS_PER_DOUBLE_INT)
;
else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
{
- *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
+ *hv &= ~(HOST_WIDE_INT_M1U << (prec - count - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
- *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
+ *lv &= ~(HOST_WIDE_INT_M1U << (prec - count));
*lv |= signmask << (prec - count);
}
}
/* Shift the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Shift right if COUNT is negative.
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
@@ -321,27 +321,27 @@ lshift_double (unsigned HOST_WIDE_INT l1
signmask = -((prec > HOST_BITS_PER_WIDE_INT
? ((unsigned HOST_WIDE_INT) *hv
>> (prec - HOST_BITS_PER_WIDE_INT - 1))
: (*lv >> (prec - 1))) & 1);
if (prec >= HOST_BITS_PER_DOUBLE_INT)
;
else if (prec >= HOST_BITS_PER_WIDE_INT)
{
- *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ *hv &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
- *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
+ *lv &= ~(HOST_WIDE_INT_M1U << prec);
*lv |= signmask << prec;
}
}
/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
CODE is a tree code for a kind of division, one of
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
or EXACT_DIV_EXPR
It controls how the quotient is rounded to an integer.
===================================================================
@@ -98,21 +98,21 @@ reachable_at_most_once (basic_block va_a
}
}
stack.release ();
sbitmap_free (visited);
return ret;
}
/* For statement COUNTER = RHS, if RHS is COUNTER + constant,
- return constant, otherwise return (unsigned HOST_WIDE_INT) -1.
+ return constant, otherwise return HOST_WIDE_INT_M1U.
GPR_P is true if this is GPR counter. */
static unsigned HOST_WIDE_INT
va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs,
bool gpr_p)
{
tree lhs, orig_lhs;
gimple stmt;
unsigned HOST_WIDE_INT ret = 0, val, counter_val;
unsigned int max_size;
@@ -142,21 +142,21 @@ va_list_counter_bump (struct stdarg_info
break;
}
ret -= counter_val - si->offsets[SSA_NAME_VERSION (lhs)];
break;
}
stmt = SSA_NAME_DEF_STMT (lhs);
if (!is_gimple_assign (stmt) || gimple_assign_lhs (stmt) != lhs)
- return (unsigned HOST_WIDE_INT) -1;
+ return HOST_WIDE_INT_M1U;
rhs_code = gimple_assign_rhs_code (stmt);
rhs1 = gimple_assign_rhs1 (stmt);
if ((get_gimple_rhs_class (rhs_code) == GIMPLE_SINGLE_RHS
|| gimple_assign_cast_p (stmt))
&& TREE_CODE (rhs1) == SSA_NAME)
{
lhs = rhs1;
continue;
}
@@ -175,35 +175,35 @@ va_list_counter_bump (struct stdarg_info
&& TREE_CODE (TREE_OPERAND (rhs1, 0)) == MEM_REF
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0)) == SSA_NAME
&& host_integerp (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1))
{
ret += tree_low_cst (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1);
lhs = TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0);
continue;
}
if (get_gimple_rhs_class (rhs_code) != GIMPLE_SINGLE_RHS)
- return (unsigned HOST_WIDE_INT) -1;
+ return HOST_WIDE_INT_M1U;
rhs = gimple_assign_rhs1 (stmt);
if (TREE_CODE (counter) != TREE_CODE (rhs))
- return (unsigned HOST_WIDE_INT) -1;
+ return HOST_WIDE_INT_M1U;
if (TREE_CODE (counter) == COMPONENT_REF)
{
if (get_base_address (counter) != get_base_address (rhs)
|| TREE_CODE (TREE_OPERAND (rhs, 1)) != FIELD_DECL
|| TREE_OPERAND (counter, 1) != TREE_OPERAND (rhs, 1))
- return (unsigned HOST_WIDE_INT) -1;
+ return HOST_WIDE_INT_M1U;
}
else if (counter != rhs)
- return (unsigned HOST_WIDE_INT) -1;
+ return HOST_WIDE_INT_M1U;
lhs = NULL;
}
lhs = orig_lhs;
val = ret + counter_val;
while (lhs)
{
enum tree_code rhs_code;
tree rhs1;
@@ -394,21 +394,21 @@ va_list_ptr_read (struct stdarg_info *si
"in bb%d\n", si->bb->index, si->compute_sizes ? "" : "not ",
si->va_start_bb->index);
}
/* For void * or char * va_list types, there is just one counter.
If va_arg is used in a loop, we don't know how many registers need
saving. */
if (! si->compute_sizes)
return false;
- if (va_list_counter_bump (si, ap, tem, true) == (unsigned HOST_WIDE_INT) -1)
+ if (va_list_counter_bump (si, ap, tem, true) == HOST_WIDE_INT_M1U)
return false;
/* Note the temporary, as we need to track whether it doesn't escape
the current function. */
bitmap_set_bit (si->va_list_escape_vars, SSA_NAME_VERSION (tem));
return true;
}
@@ -497,21 +497,21 @@ check_va_list_escapes (struct stdarg_inf
/* For void * or char * va_list types, there is just one counter.
If va_arg is used in a loop, we don't know how many registers need
saving. */
if (! si->compute_sizes)
{
si->va_list_escapes = true;
return;
}
if (va_list_counter_bump (si, si->va_start_ap, lhs, true)
- == (unsigned HOST_WIDE_INT) -1)
+ == HOST_WIDE_INT_M1U)
{
si->va_list_escapes = true;
return;
}
bitmap_set_bit (si->va_list_escape_vars, SSA_NAME_VERSION (lhs));
}
/* Check all uses of temporaries from si->va_list_escape_vars bitmap.
===================================================================
@@ -4024,21 +4024,21 @@ nonzero_bits1 (const_rtx x, enum machine
return nonzero_for_hook;
}
case CONST_INT:
#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
/* If X is negative in MODE, sign-extend the value. */
if (INTVAL (x) > 0
&& mode_width < BITS_PER_WORD
&& (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
!= 0)
- return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
+ return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
#endif
return UINTVAL (x);
case MEM:
#ifdef LOAD_EXTEND_OP
/* In many, if not most, RISC machines, reading a byte from memory
zeros the rest of the register. Noticing that fact saves a lot
of extra zero-extends. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
===================================================================
@@ -475,21 +475,21 @@ struct constraint_expr
/* Offset, in bits, of this constraint from the beginning of
variables it ends up referring to.
IOW, in a deref constraint, we would deref, get the result set,
then add OFFSET to each member. */
HOST_WIDE_INT offset;
};
/* Use 0x8000... as special unknown offset. */
-#define UNKNOWN_OFFSET ((HOST_WIDE_INT)-1 << (HOST_BITS_PER_WIDE_INT-1))
+#define UNKNOWN_OFFSET HOST_WIDE_INT_MIN
typedef struct constraint_expr ce_s;
static void get_constraint_for_1 (tree, vec<ce_s> *, bool, bool);
static void get_constraint_for (tree, vec<ce_s> *);
static void get_constraint_for_rhs (tree, vec<ce_s> *);
static void do_deref (vec<ce_s> *);
/* Our set constraints are made up of two constraint expressions, one
LHS, and one RHS.
===================================================================
@@ -8093,28 +8093,27 @@ fold_builtin_bitop (tree fndecl, tree ar
type = TREE_TYPE (arg);
width = TYPE_PRECISION (type);
lo = TREE_INT_CST_LOW (arg);
/* Clear all the bits that are beyond the type's precision. */
if (width > HOST_BITS_PER_WIDE_INT)
{
hi = TREE_INT_CST_HIGH (arg);
if (width < HOST_BITS_PER_DOUBLE_INT)
- hi &= ~((unsigned HOST_WIDE_INT) (-1)
- << (width - HOST_BITS_PER_WIDE_INT));
+ hi &= ~(HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT));
}
else
{
hi = 0;
if (width < HOST_BITS_PER_WIDE_INT)
- lo &= ~((unsigned HOST_WIDE_INT) (-1) << width);
+ lo &= ~(HOST_WIDE_INT_M1U << width);
}
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_INT_FN (BUILT_IN_FFS):
if (lo != 0)
result = ffs_hwi (lo);
else if (hi != 0)
result = HOST_BITS_PER_WIDE_INT + ffs_hwi (hi);
else
@@ -8137,27 +8136,27 @@ fold_builtin_bitop (tree fndecl, tree ar
result = HOST_BITS_PER_WIDE_INT + ctz_hwi (hi);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result))
result = width;
break;
CASE_INT_FN (BUILT_IN_CLRSB):
if (width > HOST_BITS_PER_WIDE_INT
&& (hi & ((unsigned HOST_WIDE_INT) 1
<< (width - HOST_BITS_PER_WIDE_INT - 1))) != 0)
{
- hi = ~hi & ~((unsigned HOST_WIDE_INT) (-1)
+ hi = ~hi & ~(HOST_WIDE_INT_M1U
<< (width - HOST_BITS_PER_WIDE_INT - 1));
lo = ~lo;
}
else if (width <= HOST_BITS_PER_WIDE_INT
&& (lo & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
- lo = ~lo & ~((unsigned HOST_WIDE_INT) (-1) << (width - 1));
+ lo = ~lo & ~(HOST_WIDE_INT_M1U << (width - 1));
if (hi != 0)
result = width - floor_log2 (hi) - 2 - HOST_BITS_PER_WIDE_INT;
else if (lo != 0)
result = width - floor_log2 (lo) - 2;
else
result = width - 1;
break;
CASE_INT_FN (BUILT_IN_POPCOUNT):
result = 0;
===================================================================
@@ -3690,39 +3690,39 @@ expand_smod_pow2 (enum machine_mode mode
}
/* Mask contains the mode's signbit and the significant bits of the
modulus. By including the signbit in the operation, many targets
can avoid an explicit compare operation in the following comparison
against zero. */
masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
+ masklow |= HOST_WIDE_INT_M1U << (GET_MODE_BITSIZE (mode) - 1);
maskhigh = -1;
}
else
- maskhigh = (HOST_WIDE_INT) -1
+ maskhigh = HOST_WIDE_INT_M1U
<< (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
temp = expand_binop (mode, and_optab, op0,
immed_double_const (masklow, maskhigh, mode),
result, 1, OPTAB_LIB_WIDEN);
if (temp != result)
emit_move_insn (result, temp);
label = gen_label_rtx ();
do_cmp_and_jump (result, const0_rtx, GE, mode, label);
temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
- masklow = (HOST_WIDE_INT) -1 << logd;
+ masklow = HOST_WIDE_INT_M1U << logd;
maskhigh = -1;
temp = expand_binop (mode, ior_optab, temp,
immed_double_const (masklow, maskhigh, mode),
result, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
if (temp != result)
emit_move_insn (result, temp);
emit_label (label);
return result;
===================================================================
@@ -103,21 +103,25 @@ extern char sizeof_long_long_must_be_8[s
# define HOST_HALF_WIDE_INT_PRINT_DEC "%" HOST_HALF_WIDE_INT_PRINT "d"
# define HOST_HALF_WIDE_INT_PRINT_DEC_C HOST_HALF_WIDE_INT_PRINT_DEC HOST_HALF_WIDE_INT_PRINT_C
# define HOST_HALF_WIDE_INT_PRINT_UNSIGNED "%" HOST_HALF_WIDE_INT_PRINT "u"
# define HOST_HALF_WIDE_INT_PRINT_HEX "%#" HOST_HALF_WIDE_INT_PRINT "x"
# define HOST_HALF_WIDE_INT_PRINT_HEX_PURE "%" HOST_HALF_WIDE_INT_PRINT "x"
#else
#error Please add support for HOST_HALF_WIDE_INT
#endif
+#define HOST_WIDE_INT_UC(X) HOST_WIDE_INT_C(X ## U)
#define HOST_WIDE_INT_1 HOST_WIDE_INT_C(1)
+#define HOST_WIDE_INT_1U HOST_WIDE_INT_UC(1)
+#define HOST_WIDE_INT_M1 HOST_WIDE_INT_C(-1)
+#define HOST_WIDE_INT_M1U HOST_WIDE_INT_UC(-1)
/* This is a magic identifier which allows GCC to figure out the type
of HOST_WIDE_INT for %wd specifier checks. You must issue this
typedef before using the __asm_fprintf__ format attribute. */
typedef HOST_WIDE_INT __gcc_host_wide_int__;
/* Various printf format strings for HOST_WIDE_INT. */
#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
# define HOST_WIDE_INT_PRINT HOST_LONG_FORMAT