@@ -241,8 +241,10 @@ streamer_read_value_range (class lto_input_block *ib, data_in *data_in,
int_range<2> tmp (type, lb, ub);
r.union_ (tmp);
}
- wide_int nz = streamer_read_wide_int (ib);
- r.set_nonzero_bits (nz);
+ wide_int value = streamer_read_wide_int (ib);
+ wide_int mask = streamer_read_wide_int (ib);
+ irange_bitmask bm (value, mask);
+ r.update_bitmask (bm);
return;
}
if (is_a <frange> (vr))
@@ -423,7 +423,10 @@ streamer_write_vrange (struct output_block *ob, const vrange &v)
streamer_write_wide_int (ob, r.lower_bound (i));
streamer_write_wide_int (ob, r.upper_bound (i));
}
- streamer_write_wide_int (ob, r.get_nonzero_bits ());
+ // TODO: We could avoid streaming out the value if the mask is -1.
+ irange_bitmask bm = r.get_bitmask ();
+ streamer_write_wide_int (ob, bm.value ());
+ streamer_write_wide_int (ob, bm.mask ());
return;
}
if (is_a <frange> (v))
@@ -2876,16 +2876,20 @@ operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
return true;
}
- // Update the nonzero mask. Truncating casts are problematic unless
+ // Update the bitmask. Truncating casts are problematic unless
// the conversion fits in the resulting outer type.
- wide_int nz = inner.get_nonzero_bits ();
+ irange_bitmask bm = inner.get_bitmask ();
if (truncating_cast_p (inner, outer)
- && wi::rshift (nz, wi::uhwi (TYPE_PRECISION (outer.type ()),
- TYPE_PRECISION (inner.type ())),
+ && wi::rshift (bm.mask (),
+ wi::uhwi (TYPE_PRECISION (outer.type ()),
+ TYPE_PRECISION (inner.type ())),
TYPE_SIGN (inner.type ())) != 0)
return true;
- nz = wide_int::from (nz, TYPE_PRECISION (type), TYPE_SIGN (inner.type ()));
- r.set_nonzero_bits (nz);
+ unsigned prec = TYPE_PRECISION (type);
+ signop sign = TYPE_SIGN (inner.type ());
+ bm = irange_bitmask (wide_int::from (bm.value (), prec, sign),
+ wide_int::from (bm.mask (), prec, sign));
+ r.update_bitmask (bm);
return true;
}
@@ -12,4 +12,4 @@ void saxpy(size_t n)
foobar (n);
}
-// { dg-final { scan-tree-dump "NONZERO.*fff8" "dom2" } }
+// { dg-final { scan-tree-dump "fff8 VALUE 0x0" "dom2" } }
@@ -39,4 +39,4 @@ void func (unsigned n, unsigned m)
/* { dg-final { scan-tree-dump-not "dead" "vrp1" } } */
/* { dg-final { scan-tree-dump-times "builtin_unreachable" 1 "vrp1" } } */
/* { dg-final { scan-tree-dump-not "builtin_unreachable" "vrp2" } } */
-/* { dg-final { scan-tree-dump-times "fff8" 4 "vrp2" } } */
+/* { dg-final { scan-tree-dump-times "fff8 VALUE 0x0" 4 "vrp2" } } */
@@ -16,4 +16,4 @@ int f(unsigned t)
return 0;
}
-// { dg-final { scan-tree-dump "Global Exported: g_.* NONZERO 0x.*fff0" "evrp" } }
+// { dg-final { scan-tree-dump "Global Exported: g_.* MASK 0x1 VALUE 0x0" "evrp" } }
@@ -94,13 +94,16 @@ vrange_printer::print_irange_bound (const wide_int &bound, tree type) const
void
vrange_printer::print_irange_bitmasks (const irange &r) const
{
- wide_int nz = r.get_nonzero_bits ();
- if (nz == -1)
+ irange_bitmask bm = r.m_bitmask;
+ if (bm.unknown_p ())
return;
- pp_string (pp, " NONZERO ");
+ pp_string (pp, " MASK ");
char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_hex (nz, buf);
+ print_hex (bm.mask (), buf);
+ pp_string (pp, buf);
+ pp_string (pp, " VALUE ");
+ print_hex (bm.value (), buf);
pp_string (pp, buf);
}
@@ -232,7 +232,7 @@ vrange_storage::equal_p (const vrange &r) const
unsigned char *
irange_storage::write_lengths_address ()
{
- return (unsigned char *)&m_val[(m_num_ranges * 2 + 1)
+ return (unsigned char *)&m_val[(m_num_ranges * 2 + 2)
* WIDE_INT_MAX_HWIS (m_precision)];
}
@@ -301,7 +301,11 @@ irange_storage::set_irange (const irange &r)
write_wide_int (val, len, r.lower_bound (i));
write_wide_int (val, len, r.upper_bound (i));
}
- write_wide_int (val, len, r.m_nonzero_mask);
+
+ // TODO: We could avoid streaming out the value if the mask is -1.
+ irange_bitmask bm = r.m_bitmask;
+ write_wide_int (val, len, bm.value ());
+ write_wide_int (val, len, bm.mask ());
if (flag_checking)
{
@@ -367,7 +371,12 @@ irange_storage::get_irange (irange &r, tree type) const
r.union_ (tmp);
}
}
- read_wide_int (r.m_nonzero_mask, val, *len, m_precision);
+
+ wide_int bits_value, bits_mask;
+ read_wide_int (bits_value, val, *len, m_precision);
+ val += *len++;
+ read_wide_int (bits_mask, val, *len, m_precision);
+ r.m_bitmask = irange_bitmask (bits_value, bits_mask);
if (r.m_kind == VR_VARYING)
r.m_kind = VR_RANGE;
@@ -399,7 +408,7 @@ irange_storage::size (const irange &r)
return sizeof (irange_storage);
unsigned prec = TYPE_PRECISION (r.type ());
- unsigned n = r.num_pairs () * 2 + 1;
+ unsigned n = r.num_pairs () * 2 + 2;
unsigned hwi_size = ((n * WIDE_INT_MAX_HWIS (prec) - 1)
* sizeof (HOST_WIDE_INT));
unsigned len_size = n;
@@ -428,7 +437,7 @@ irange_storage::dump () const
int i, j;
fprintf (stderr, " lengths = [ ");
- for (i = 0; i < m_num_ranges * 2 + 1; ++i)
+ for (i = 0; i < m_num_ranges * 2 + 2; ++i)
fprintf (stderr, "%d ", len[i]);
fprintf (stderr, "]\n");
@@ -443,8 +452,13 @@ irange_storage::dump () const
*val++);
++len;
}
+
+ // Dump value/mask pair.
+ for (j = 0; j < *len; ++j)
+ fprintf (stderr, " [VALUE] " HOST_WIDE_INT_PRINT_DEC "\n", *val++);
+ ++len;
for (j = 0; j < *len; ++j)
- fprintf (stderr, " [NZ] " HOST_WIDE_INT_PRINT_DEC "\n", *val++);
+ fprintf (stderr, " [MASK] " HOST_WIDE_INT_PRINT_DEC "\n", *val++);
}
DEBUG_FUNCTION void
@@ -91,7 +91,7 @@ private:
enum value_range_kind m_kind : 3;
- // The length of this is m_num_ranges * 2 + 1 to accomodate the nonzero bits.
+ // The length of this is m_num_ranges * 2 + 2 to accomodate the bitmask.
HOST_WIDE_INT m_val[1];
// Another variable-length part of the structure following the HWIs.
@@ -79,6 +79,13 @@ debug (const Value_Range &r)
fprintf (stderr, "\n");
}
+DEBUG_FUNCTION void
+debug (const irange_bitmask &bm)
+{
+ bm.dump (stderr);
+ fprintf (stderr, "\n");
+}
+
// Default vrange definitions.
bool
@@ -235,6 +242,23 @@ vrange::dump (FILE *file) const
pp_flush (&buffer);
}
+void
+irange_bitmask::dump (FILE *file) const
+{
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ pretty_printer buffer;
+
+ pp_needs_newline (&buffer) = true;
+ buffer.buffer->stream = file;
+ pp_string (&buffer, "MASK ");
+ print_hex (m_mask, buf);
+ pp_string (&buffer, buf);
+ pp_string (&buffer, " VALUE ");
+ print_hex (m_value, buf);
+ pp_string (&buffer, buf);
+ pp_flush (&buffer);
+}
+
namespace inchash
{
@@ -263,7 +287,9 @@ add_vrange (const vrange &v, inchash::hash &hstate,
hstate.add_wide_int (r.lower_bound (i));
hstate.add_wide_int (r.upper_bound (i));
}
- hstate.add_wide_int (r.get_nonzero_bits ());
+ irange_bitmask bm = r.get_bitmask ();
+ hstate.add_wide_int (bm.value ());
+ hstate.add_wide_int (bm.mask ());
return;
}
if (is_a <frange> (v))
@@ -908,7 +934,7 @@ irange::operator= (const irange &src)
m_num_ranges = lim;
m_type = src.m_type;
m_kind = src.m_kind;
- m_nonzero_mask = src.m_nonzero_mask;
+ m_bitmask = src.m_bitmask;
if (m_max_ranges == 1)
normalize_kind ();
if (flag_checking)
@@ -972,7 +998,7 @@ irange::set (tree type, const wide_int &min, const wide_int &max,
wide_int max_value = wi::max_value (prec, sign);
m_type = type;
- m_nonzero_mask = wi::minus_one (prec);
+ m_bitmask.set_unknown (prec);
if (kind == VR_RANGE)
{
@@ -1059,7 +1085,7 @@ irange::verify_range ()
unsigned prec = TYPE_PRECISION (m_type);
if (m_kind == VR_VARYING)
{
- gcc_checking_assert (m_nonzero_mask == -1);
+ gcc_checking_assert (m_bitmask.unknown_p ());
gcc_checking_assert (m_num_ranges == 1);
gcc_checking_assert (varying_compatible_p ());
gcc_checking_assert (lower_bound ().get_precision () == prec);
@@ -1077,7 +1103,7 @@ irange::verify_range ()
int c = wi::cmp (lb, ub, TYPE_SIGN (m_type));
gcc_checking_assert (c == 0 || c == -1);
}
- gcc_checking_assert (m_nonzero_mask.get_precision () == prec);
+ m_bitmask.verify_mask ();
}
bool
@@ -1101,9 +1127,18 @@ irange::operator== (const irange &other) const
if (lb != lb_other || ub != ub_other)
return false;
}
- widest_int nz1 = widest_int::from (get_nonzero_bits (), sign1);
- widest_int nz2 = widest_int::from (other.get_nonzero_bits (), sign2);
- return nz1 == nz2;
+
+ irange_bitmask bm1 = get_bitmask ();
+ irange_bitmask bm2 = other.get_bitmask ();
+ widest_int tmp1 = widest_int::from (bm1.mask (), sign1);
+ widest_int tmp2 = widest_int::from (bm2.mask (), sign2);
+ if (tmp1 != tmp2)
+ return false;
+ if (bm1.unknown_p ())
+ return true;
+ tmp1 = widest_int::from (bm1.value (), sign1);
+ tmp2 = widest_int::from (bm2.value (), sign2);
+ return tmp1 == tmp2;
}
/* If range is a singleton, place it in RESULT and return TRUE. */
@@ -1143,10 +1178,10 @@ irange::contains_p (const wide_int &cst) const
if (undefined_p ())
return false;
- // See if we can exclude CST based on the nonzero bits.
- if (m_nonzero_mask != -1
+ // See if we can exclude CST based on the known 0 bits.
+ if (!m_bitmask.unknown_p ()
&& cst != 0
- && wi::bit_and (m_nonzero_mask, cst) == 0)
+ && wi::bit_and (m_bitmask.get_nonzero_bits (), cst) == 0)
return false;
signop sign = TYPE_SIGN (type ());
@@ -1176,7 +1211,7 @@ irange::irange_single_pair_union (const irange &r)
{
// If current upper bound is new upper bound, we're done.
if (wi::le_p (r.m_base[1], m_base[1], sign))
- return union_nonzero_bits (r);
+ return union_bitmask (r);
// Otherwise R has the new upper bound.
// Check for overlap/touching ranges, or single target range.
if (m_max_ranges == 1
@@ -1192,7 +1227,7 @@ irange::irange_single_pair_union (const irange &r)
}
// The range has been altered, so normalize it even if nothing
// changed in the mask.
- if (!union_nonzero_bits (r))
+ if (!union_bitmask (r))
normalize_kind ();
if (flag_checking)
verify_range ();
@@ -1221,7 +1256,7 @@ irange::irange_single_pair_union (const irange &r)
}
// The range has been altered, so normalize it even if nothing
// changed in the mask.
- if (!union_nonzero_bits (r))
+ if (!union_bitmask (r))
normalize_kind ();
if (flag_checking)
verify_range ();
@@ -1261,7 +1296,7 @@ irange::union_ (const vrange &v)
// If this ranges fully contains R, then we need do nothing.
if (irange_contains_p (r))
- return union_nonzero_bits (r);
+ return union_bitmask (r);
// Do not worry about merging and such by reserving twice as many
// pairs as needed, and then simply sort the 2 ranges into this
@@ -1356,7 +1391,7 @@ irange::union_ (const vrange &v)
m_kind = VR_RANGE;
// The range has been altered, so normalize it even if nothing
// changed in the mask.
- if (!union_nonzero_bits (r))
+ if (!union_bitmask (r))
normalize_kind ();
if (flag_checking)
verify_range ();
@@ -1439,13 +1474,13 @@ irange::intersect (const vrange &v)
if (undefined_p ())
return true;
- res |= intersect_nonzero_bits (r);
+ res |= intersect_bitmask (r);
return res;
}
// If R fully contains this, then intersection will change nothing.
if (r.irange_contains_p (*this))
- return intersect_nonzero_bits (r);
+ return intersect_bitmask (r);
// ?? We could probably come up with something smarter than the
// worst case scenario here.
@@ -1528,7 +1563,7 @@ irange::intersect (const vrange &v)
m_kind = VR_RANGE;
// The range has been altered, so normalize it even if nothing
// changed in the mask.
- if (!intersect_nonzero_bits (r))
+ if (!intersect_bitmask (r))
normalize_kind ();
if (flag_checking)
verify_range ();
@@ -1652,7 +1687,7 @@ irange::invert ()
signop sign = TYPE_SIGN (ttype);
wide_int type_min = wi::min_value (prec, sign);
wide_int type_max = wi::max_value (prec, sign);
- m_nonzero_mask = wi::minus_one (prec);
+ m_bitmask.set_unknown (prec);
// At this point, we need one extra sub-range to represent the
// inverse.
@@ -1723,45 +1758,45 @@ irange::invert ()
verify_range ();
}
-// Return the nonzero bits inherent in the range.
+// Return the bitmask inherent in the range.
-wide_int
-irange::get_nonzero_bits_from_range () const
+irange_bitmask
+irange::get_bitmask_from_range () const
{
wide_int min = lower_bound ();
wide_int max = upper_bound ();
wide_int xorv = min ^ max;
+ unsigned prec = TYPE_PRECISION (type ());
+
if (xorv != 0)
- {
- unsigned prec = TYPE_PRECISION (type ());
- xorv = wi::mask (prec - wi::clz (xorv), false, prec);
- }
- return min | xorv;
+ xorv = wi::mask (prec - wi::clz (xorv), false, prec);
+
+ return irange_bitmask (wi::zero (prec), min | xorv);
}
-// If the the nonzero mask can be trivially converted to a range, do
-// so and return TRUE.
+// If the the mask can be trivially converted to a range, do so and
+// return TRUE.
bool
-irange::set_range_from_nonzero_bits ()
+irange::set_range_from_bitmask ()
{
gcc_checking_assert (!undefined_p ());
- if (m_nonzero_mask == -1)
+ if (m_bitmask.unknown_p ())
return false;
- unsigned popcount = wi::popcount (m_nonzero_mask);
+ unsigned popcount = wi::popcount (m_bitmask.get_nonzero_bits ());
// If we have only one bit set in the mask, we can figure out the
// range immediately.
if (popcount == 1)
{
// Make sure we don't pessimize the range.
- if (!contains_p (m_nonzero_mask))
+ if (!contains_p (m_bitmask.get_nonzero_bits ()))
return false;
bool has_zero = contains_zero_p (*this);
- wide_int nz = m_nonzero_mask;
+ wide_int nz = m_bitmask.get_nonzero_bits ();
set (m_type, nz, nz);
- m_nonzero_mask = nz;
+ m_bitmask.set_nonzero_bits (nz);
if (has_zero)
{
int_range<2> zero;
@@ -1781,95 +1816,126 @@ irange::set_range_from_nonzero_bits ()
}
void
-irange::set_nonzero_bits (const wide_int &bits)
+irange::update_bitmask (const irange_bitmask &bm)
{
gcc_checking_assert (!undefined_p ());
- // Drop VARYINGs with a nonzero mask to a plain range.
- if (m_kind == VR_VARYING && bits != -1)
+ // Drop VARYINGs with known bits to a plain range.
+ if (m_kind == VR_VARYING && !bm.unknown_p ())
m_kind = VR_RANGE;
- m_nonzero_mask = bits;
- if (!set_range_from_nonzero_bits ())
+ m_bitmask = bm;
+ if (!set_range_from_bitmask ())
normalize_kind ();
if (flag_checking)
verify_range ();
}
-// Return the nonzero bitmask. This will return the nonzero bits plus
-// the nonzero bits inherent in the range.
+// Return the bitmask of known bits that includes the bitmask inherent
+// in the range.
+
+irange_bitmask
+irange::get_bitmask () const
+{
+ gcc_checking_assert (!undefined_p ());
+
+ // The mask inherent in the range is calculated on-demand. For
+ // example, [0,255] does not have known bits set by default. This
+ // saves us considerable time, because setting it at creation incurs
+ // a large penalty for irange::set. At the time of writing there
+ // was a 5% slowdown in VRP if we kept the mask precisely up to date
+ // at all times. Instead, we default to -1 and set it when
+ // explicitly requested. However, this function will always return
+ // the correct mask.
+ //
+ // This also means that the mask may have a finer granularity than
+ // the range and thus contradict it. Think of the mask as an
+ // enhancement to the range. For example:
+ //
+ // [3, 1000] MASK 0xfffffffe VALUE 0x0
+ //
+ // 3 is in the range endpoints, but is excluded per the known 0 bits
+ // in the mask.
+ irange_bitmask bm = get_bitmask_from_range ();
+ if (!m_bitmask.unknown_p ())
+ bm.intersect (m_bitmask);
+ return bm;
+}
+
+// Set the nonzero bits in R into THIS. Return TRUE and
+// normalize the range if anything changed.
+
+void
+irange::set_nonzero_bits (const wide_int &bits)
+{
+ gcc_checking_assert (!undefined_p ());
+ irange_bitmask bm (wi::zero (TYPE_PRECISION (type ())), bits);
+ update_bitmask (bm);
+}
+
+// Return the nonzero bits in R.
wide_int
irange::get_nonzero_bits () const
{
gcc_checking_assert (!undefined_p ());
- // The nonzero mask inherent in the range is calculated on-demand.
- // For example, [0,255] does not have a 0xff nonzero mask by default
- // (unless manually set). This saves us considerable time, because
- // setting it at creation incurs a large penalty for irange::set.
- // At the time of writing there was a 5% slowdown in VRP if we kept
- // the mask precisely up to date at all times. Instead, we default
- // to -1 and set it when explicitly requested. However, this
- // function will always return the correct mask.
- if (m_nonzero_mask == -1)
- return get_nonzero_bits_from_range ();
- else
- return m_nonzero_mask & get_nonzero_bits_from_range ();
+ irange_bitmask bm = get_bitmask ();
+ return bm.value () | bm.mask ();
}
-// Intersect the nonzero bits in R into THIS. Return TRUE and
-// normalize the range if anything changed.
+// Intersect the bitmask in R into THIS and normalize the range.
+// Return TRUE if the intersection changed anything.
bool
-irange::intersect_nonzero_bits (const irange &r)
+irange::intersect_bitmask (const irange &r)
{
gcc_checking_assert (!undefined_p () && !r.undefined_p ());
- if (m_nonzero_mask == -1 && r.m_nonzero_mask == -1)
+ if (m_bitmask == r.m_bitmask)
return false;
- if (m_nonzero_mask != r.m_nonzero_mask)
- {
- wide_int nz = get_nonzero_bits () & r.get_nonzero_bits ();
- // If the nonzero bits did not change, return false.
- if (nz == get_nonzero_bits ())
- return false;
+ irange_bitmask bm = get_bitmask ();
+ if (!bm.intersect (r.get_bitmask ()))
+ return false;
- m_nonzero_mask = nz;
- if (!set_range_from_nonzero_bits ())
- normalize_kind ();
- if (flag_checking)
- verify_range ();
- return true;
- }
- return false;
+ m_bitmask = bm;
+ if (!set_range_from_bitmask ())
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return true;
}
-// Union the nonzero bits in R into THIS. Return TRUE and normalize
-// the range if anything changed.
+// Union the bitmask in R into THIS. Return TRUE and normalize the
+// range if anything changed.
bool
-irange::union_nonzero_bits (const irange &r)
+irange::union_bitmask (const irange &r)
{
gcc_checking_assert (!undefined_p () && !r.undefined_p ());
- if (m_nonzero_mask == -1 && r.m_nonzero_mask == -1)
+ if (m_bitmask == r.m_bitmask)
return false;
- if (m_nonzero_mask != r.m_nonzero_mask)
- {
- wide_int save = get_nonzero_bits ();
- m_nonzero_mask = save | r.get_nonzero_bits ();
- if (m_nonzero_mask == save)
- return false;
- // No need to call set_range_from_nonzero_bits, because we'll
- // never narrow the range. Besides, it would cause endless
- // recursion because of the union_ in
- // set_range_from_nonzero_bits.
- normalize_kind ();
- return true;
- }
- return false;
+ irange_bitmask bm = get_bitmask ();
+ if (!bm.union_ (r.get_bitmask ()))
+ return false;
+
+ m_bitmask = bm;
+ // No need to call set_range_from_mask, because we'll never
+ // narrow the range. Besides, it would cause endless recursion
+ // because of the union_ in set_range_from_mask.
+ normalize_kind ();
+ return true;
+}
+
+void
+irange_bitmask::verify_mask () const
+{
+ gcc_assert (m_value.get_precision () == m_mask.get_precision ());
+ // Unknown bits must have their corresponding value bits cleared as
+ // it simplifies union and intersect.
+ gcc_assert (wi::bit_and (m_mask, m_value) == 0);
}
void
@@ -113,12 +113,146 @@ namespace inchash
extern void add_vrange (const vrange &, hash &, unsigned flags = 0);
}
+// A pair of values representing the known bits in a range. Zero bits
+// in MASK cover constant values. Set bits in MASK cover unknown
+// values. VALUE are the known bits.
+//
+// Set bits in MASK (no meaningful information) must have their
+// corresponding bits in VALUE cleared, as this speeds up union and
+// intersect.
+
+class irange_bitmask
+{
+public:
+ irange_bitmask () { /* uninitialized */ }
+ irange_bitmask (unsigned prec) { set_unknown (prec); }
+ irange_bitmask (const wide_int &value, const wide_int &mask);
+ wide_int value () const { return m_value; }
+ wide_int mask () const { return m_mask; }
+ void set_unknown (unsigned prec);
+ bool unknown_p () const;
+ unsigned get_precision () const;
+ bool union_ (const irange_bitmask &src);
+ bool intersect (const irange_bitmask &src);
+ bool operator== (const irange_bitmask &src) const;
+ bool operator!= (const irange_bitmask &src) const { return !(*this == src); }
+ void verify_mask () const;
+ void dump (FILE *) const;
+
+ // Convenience functions for nonzero bitmask compatibility.
+ wide_int get_nonzero_bits () const;
+ void set_nonzero_bits (const wide_int &bits);
+private:
+ wide_int m_value;
+ wide_int m_mask;
+};
+
+inline void
+irange_bitmask::set_unknown (unsigned prec)
+{
+ m_value = wi::zero (prec);
+ m_mask = wi::minus_one (prec);
+ if (flag_checking)
+ verify_mask ();
+}
+
+// Return TRUE if THIS does not have any meaningful information.
+
+inline bool
+irange_bitmask::unknown_p () const
+{
+ return m_mask == -1;
+}
+
+inline
+irange_bitmask::irange_bitmask (const wide_int &value, const wide_int &mask)
+{
+ m_value = value;
+ m_mask = mask;
+ if (flag_checking)
+ verify_mask ();
+}
+
+inline unsigned
+irange_bitmask::get_precision () const
+{
+ return m_mask.get_precision ();
+}
+
+// The following two functions are meant for backwards compatability
+// with the nonzero bitmask. A cleared bit means the value must be 0.
+// A set bit means we have no information for the bit.
+
+// Return the nonzero bits.
+inline wide_int
+irange_bitmask::get_nonzero_bits () const
+{
+ return m_value | m_mask;
+}
+
+// Set the bitmask to the nonzero bits in BITS.
+inline void
+irange_bitmask::set_nonzero_bits (const wide_int &bits)
+{
+ m_value = wi::zero (bits.get_precision ());
+ m_mask = bits;
+ if (flag_checking)
+ verify_mask ();
+}
+
+inline bool
+irange_bitmask::operator== (const irange_bitmask &src) const
+{
+ bool unknown1 = unknown_p ();
+ bool unknown2 = src.unknown_p ();
+ if (unknown1 || unknown2)
+ return unknown1 == unknown2;
+ return m_value == src.m_value && m_mask == src.m_mask;
+}
+
+inline bool
+irange_bitmask::union_ (const irange_bitmask &src)
+{
+ irange_bitmask save (*this);
+ m_mask = (m_mask | src.m_mask) | (m_value ^ src.m_value);
+ m_value = m_value & src.m_value;
+ if (flag_checking)
+ verify_mask ();
+ return *this != save;
+}
+
+inline bool
+irange_bitmask::intersect (const irange_bitmask &src)
+{
+ irange_bitmask save (*this);
+ // If we have two known bits that are incompatible, the resulting
+ // bit is undefined. It is unclear whether we should set the entire
+ // range to UNDEFINED, or just a subset of it. For now, set the
+ // entire bitmask to unknown (VARYING).
+ if (wi::bit_and (~(m_mask | src.m_mask),
+ m_value ^ src.m_value) != 0)
+ {
+ unsigned prec = m_mask.get_precision ();
+ m_mask = wi::minus_one (prec);
+ m_value = wi::zero (prec);
+ }
+ else
+ {
+ m_mask = m_mask & src.m_mask;
+ m_value = m_value | src.m_value;
+ }
+ if (flag_checking)
+ verify_mask ();
+ return *this != save;
+}
+
// An integer range without any storage.
class GTY((user)) irange : public vrange
{
friend value_range_kind get_legacy_range (const irange &, tree &, tree &);
friend class irange_storage;
+ friend class vrange_printer;
public:
// In-place setters.
void set (tree type, const wide_int &, const wide_int &,
@@ -161,6 +295,8 @@ public:
virtual bool fits_p (const vrange &r) const override;
virtual void accept (const vrange_visitor &v) const override;
+ void update_bitmask (const irange_bitmask &);
+ irange_bitmask get_bitmask () const;
// Nonzero masks.
wide_int get_nonzero_bits () const;
void set_nonzero_bits (const wide_int &bits);
@@ -187,17 +323,17 @@ private:
friend void gt_pch_nx (irange *, gt_pointer_operator, void *);
bool varying_compatible_p () const;
- bool intersect_nonzero_bits (const irange &r);
- bool union_nonzero_bits (const irange &r);
- wide_int get_nonzero_bits_from_range () const;
- bool set_range_from_nonzero_bits ();
+ bool intersect_bitmask (const irange &r);
+ bool union_bitmask (const irange &r);
+ irange_bitmask get_bitmask_from_range () const;
+ bool set_range_from_bitmask ();
bool intersect (const wide_int& lb, const wide_int& ub);
unsigned char m_num_ranges;
bool m_resizable;
unsigned char m_max_ranges;
tree m_type;
- wide_int m_nonzero_mask;
+ irange_bitmask m_bitmask;
protected:
wide_int *m_base;
};
@@ -741,7 +877,7 @@ irange::varying_compatible_p () const
if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
return (l == wi::min_value (prec, sign)
&& u == wi::max_value (prec, sign)
- && m_nonzero_mask == -1);
+ && m_bitmask.unknown_p ());
return true;
}
@@ -908,7 +1044,7 @@ irange::set_varying (tree type)
{
m_kind = VR_VARYING;
m_num_ranges = 1;
- m_nonzero_mask = wi::minus_one (TYPE_PRECISION (type));
+ m_bitmask.set_unknown (TYPE_PRECISION (type));
if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
{
@@ -966,7 +1102,8 @@ irange::set_nonzero (tree type)
m_type = type;
m_kind = VR_RANGE;
m_base[0] = wi::one (prec);
- m_base[1] = m_nonzero_mask = wi::minus_one (prec);
+ m_base[1] = wi::minus_one (prec);
+ m_bitmask.set_unknown (prec);
m_num_ranges = 1;
if (flag_checking)