@@ -92,6 +92,7 @@ along with GCC; see the file COPYING3. If not see
#include "i386-options.h"
#include "i386-builtins.h"
#include "i386-expand.h"
+#include "asan.h"
/* Split one or more double-mode RTL references into pairs of half-mode
references. The RTL can be REG, offsettable MEM, integer constant, or
@@ -9438,6 +9439,17 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
fnaddr = gen_rtx_MEM (QImode, copy_to_mode_reg (word_mode, fnaddr));
}
+ /* PR100665: Hwasan may tag code pointer which is not supported by LAM,
+ mask off code pointers here.
+ TODO: also need to handle indirect jump. */
+ if (ix86_memtag_can_tag_addresses () && !fndecl
+ && sanitize_flags_p (SANITIZE_HWADDRESS))
+ {
+ rtx untagged_addr = ix86_memtag_untagged_pointer (XEXP (fnaddr, 0),
+ NULL_RTX);
+ fnaddr = gen_rtx_MEM (QImode, untagged_addr);
+ }
+
call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
if (retval)
@@ -2033,6 +2033,9 @@ ix86_option_override_internal (bool main_args_p,
if (TARGET_UINTR && !TARGET_64BIT)
error ("%<-muintr%> not supported for 32-bit code");
+ if (ix86_lam_type && !TARGET_LP64)
+ error ("%<-mlam=%> option: [u48|u57] not supported for 32-bit code");
+
if (!opts->x_ix86_arch_string)
opts->x_ix86_arch_string
= TARGET_64BIT_P (opts->x_ix86_isa_flags)
@@ -128,4 +128,10 @@ enum harden_sls {
harden_sls_all = harden_sls_return | harden_sls_indirect_jmp
};
+enum lam_type {
+ lam_none = 0,
+ lam_u48 = 1,
+ lam_u57
+};
+
#endif
@@ -228,6 +228,8 @@ extern void ix86_expand_atomic_fetch_op_loop (rtx, rtx, rtx, enum rtx_code,
extern void ix86_expand_cmpxchg_loop (rtx *, rtx, rtx, rtx, rtx, rtx,
bool, rtx_code_label *);
extern rtx ix86_expand_fast_convert_bf_to_sf (rtx);
+extern rtx ix86_memtag_untagged_pointer (rtx, rtx);
+extern bool ix86_memtag_can_tag_addresses (void);
#ifdef TREE_CODE
extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
@@ -24274,6 +24274,111 @@ ix86_push_rounding (poly_int64 bytes)
return ROUND_UP (bytes, UNITS_PER_WORD);
}
+/* Use 8 bits metadata start from bit48 for LAM_U48,
+ 6 bits metadat start from bit57 for LAM_U57. */
+#define IX86_HWASAN_SHIFT (ix86_lam_type == lam_u48 \
+ ? 48 \
+ : (ix86_lam_type == lam_u57 ? 57 : 0))
+#define IX86_HWASAN_TAG_SIZE (ix86_lam_type == lam_u48 \
+ ? 8 \
+ : (ix86_lam_type == lam_u57 ? 6 : 0))
+
+/* Implement TARGET_MEMTAG_CAN_TAG_ADDRESSES. */
+bool
+ix86_memtag_can_tag_addresses ()
+{
+ return ix86_lam_type != lam_none && TARGET_LP64;
+}
+
+/* Implement TARGET_MEMTAG_TAG_SIZE. */
+unsigned char
+ix86_memtag_tag_size ()
+{
+ return IX86_HWASAN_TAG_SIZE;
+}
+
+/* Implement TARGET_MEMTAG_SET_TAG. */
+rtx
+ix86_memtag_set_tag (rtx untagged, rtx tag, rtx target)
+{
+ /* default_memtag_insert_random_tag may
+ generate tag with value more than 6 bits. */
+ if (ix86_lam_type == lam_u57)
+ {
+ unsigned HOST_WIDE_INT and_imm
+ = (HOST_WIDE_INT_1U << IX86_HWASAN_TAG_SIZE) - 1;
+
+ emit_insn (gen_andqi3 (tag, tag, GEN_INT (and_imm)));
+ }
+ tag = expand_simple_binop (Pmode, ASHIFT, tag,
+ GEN_INT (IX86_HWASAN_SHIFT), NULL_RTX,
+ /* unsignedp = */1, OPTAB_WIDEN);
+ rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
+ /* unsignedp = */1, OPTAB_DIRECT);
+ return ret;
+}
+
+/* Implement TARGET_MEMTAG_EXTRACT_TAG. */
+rtx
+ix86_memtag_extract_tag (rtx tagged_pointer, rtx target)
+{
+ rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
+ GEN_INT (IX86_HWASAN_SHIFT), target,
+ /* unsignedp = */0,
+ OPTAB_DIRECT);
+ rtx ret = gen_reg_rtx (QImode);
+ /* Mask off bit63 when LAM_U57. */
+ if (ix86_lam_type == lam_u57)
+ {
+ unsigned HOST_WIDE_INT and_imm
+ = (HOST_WIDE_INT_1U << IX86_HWASAN_TAG_SIZE) - 1;
+ emit_insn (gen_andqi3 (ret, gen_lowpart (QImode, tag),
+ gen_int_mode (and_imm, QImode)));
+ }
+ else
+ emit_move_insn (ret, gen_lowpart (QImode, tag));
+ return ret;
+}
+
+/* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER. */
+rtx
+ix86_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
+{
+ /* Leave bit63 alone. */
+ rtx tag_mask = gen_int_mode (((HOST_WIDE_INT_1U << IX86_HWASAN_SHIFT)
+ + (HOST_WIDE_INT_1U << 63) - 1),
+ Pmode);
+ rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
+ tag_mask, target, true,
+ OPTAB_DIRECT);
+ gcc_assert (untagged_base);
+ return untagged_base;
+}
+
+/* Implement TARGET_MEMTAG_ADD_TAG. */
+rtx
+ix86_memtag_add_tag (rtx base, poly_int64 offset, unsigned char tag_offset)
+{
+ rtx base_tag = gen_reg_rtx (QImode);
+ rtx base_addr = gen_reg_rtx (Pmode);
+ rtx tagged_addr = gen_reg_rtx (Pmode);
+ rtx new_tag = gen_reg_rtx (QImode);
+ unsigned HOST_WIDE_INT and_imm
+ = (HOST_WIDE_INT_1U << IX86_HWASAN_SHIFT) - 1;
+
+ /* When there's "overflow" in tag adding,
+ need to mask the most significant bit off. */
+ emit_move_insn (base_tag, ix86_memtag_extract_tag (base, NULL_RTX));
+ emit_move_insn (base_addr,
+ ix86_memtag_untagged_pointer (base, NULL_RTX));
+ emit_insn (gen_add2_insn (base_tag, gen_int_mode (tag_offset, QImode)));
+ emit_move_insn (new_tag, base_tag);
+ emit_insn (gen_andqi3 (new_tag, new_tag, gen_int_mode (and_imm, QImode)));
+ emit_move_insn (tagged_addr,
+ ix86_memtag_set_tag (base_addr, new_tag, NULL_RTX));
+ return plus_constant (Pmode, tagged_addr, offset);
+}
+
/* Target-specific selftests. */
#if CHECKING_P
@@ -25068,6 +25173,24 @@ ix86_libgcc_floating_mode_supported_p
# define TARGET_ASM_RELOC_RW_MASK ix86_reloc_rw_mask
#endif
+#undef TARGET_MEMTAG_CAN_TAG_ADDRESSES
+#define TARGET_MEMTAG_CAN_TAG_ADDRESSES ix86_memtag_can_tag_addresses
+
+#undef TARGET_MEMTAG_ADD_TAG
+#define TARGET_MEMTAG_ADD_TAG ix86_memtag_add_tag
+
+#undef TARGET_MEMTAG_SET_TAG
+#define TARGET_MEMTAG_SET_TAG ix86_memtag_set_tag
+
+#undef TARGET_MEMTAG_EXTRACT_TAG
+#define TARGET_MEMTAG_EXTRACT_TAG ix86_memtag_extract_tag
+
+#undef TARGET_MEMTAG_UNTAGGED_POINTER
+#define TARGET_MEMTAG_UNTAGGED_POINTER ix86_memtag_untagged_pointer
+
+#undef TARGET_MEMTAG_TAG_SIZE
+#define TARGET_MEMTAG_TAG_SIZE ix86_memtag_tag_size
+
static bool ix86_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
{
#ifdef OPTION_GLIBC
@@ -1250,3 +1250,19 @@ Support RAOINT built-in functions and code generation.
munroll-only-small-loops
Target Var(ix86_unroll_only_small_loops) Init(0) Save
Enable conservative small loop unrolling.
+
+mlam=
+Target RejectNegative Joined Enum(lam_type) Var(ix86_lam_type) Init(lam_none)
+-mlam=[none|u48|u57] Instrument meta data position in user data pointers.
+
+Enum
+Name(lam_type) Type(enum lam_type) UnknownError(unknown lam type %qs)
+
+EnumValue
+Enum(lam_type) String(none) Value(lam_none)
+
+EnumValue
+Enum(lam_type) String(u48) Value(lam_u48)
+
+EnumValue
+Enum(lam_type) String(u57) Value(lam_u57)
@@ -1448,7 +1448,7 @@ See RS/6000 and PowerPC Options.
-mindirect-branch=@var{choice} -mfunction-return=@var{choice} @gol
-mindirect-branch-register -mharden-sls=@var{choice} @gol
-mindirect-branch-cs-prefix -mneeded -mno-direct-extern-access @gol
--munroll-only-small-loops}
+-munroll-only-small-loops -mlam=@var{choice}}
@emph{x86 Windows Options}
@gccoptlist{-mconsole -mcygwin -mno-cygwin -mdll @gol
@@ -33857,6 +33857,13 @@ Controls conservative small loop unrolling. It is default enabled by
O2, and unrolls loop with less than 4 insns by 1 time. Explicit
-f[no-]unroll-[all-]loops would disable this flag to avoid any
unintended unrolling behavior that user does not want.
+
+@item -mlam=@var{choice}
+@opindex mlam
+LAM(linear-address masking) allows special bits in the pointer to be used
+for metadata. The default is @samp{none}. With @samp{u48}, pointer bits in
+positions 62:48 can be used for metadata; With @samp{u57}, pointer bits in
+positions 62:57 can be used for metadata.
@end table
@node x86 Windows Options