===================================================================
@@ -132,11 +132,15 @@ const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
-// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
+// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__sparc__)
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
===================================================================
@@ -99,6 +99,13 @@
// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
//
+// Default Linux/SPARC64 (52-bit VMA) mapping:
+// || `[0x8000000000000, 0xfffffffffffff]` || HighMem ||
+// || `[0x1080000000000, 0x207ffffffffff]` || HighShadow ||
+// || `[0x0090000000000, 0x107ffffffffff]` || ShadowGap ||
+// || `[0x0080000000000, 0x008ffffffffff]` || LowShadow ||
+// || `[0x0000000000000, 0x007ffffffffff]` || LowMem ||
+//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -161,6 +168,7 @@ static const u64 kMIPS32_ShadowOffset32
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
+static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
@@ -223,6 +231,8 @@ static const u64 kMyriadCacheBitMask32 =
# define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
+# elif defined(__sparc__)
+# define SHADOW_OFFSET kSPARC64_ShadowOffset64
# elif SANITIZER_WINDOWS64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
@@ -269,6 +279,8 @@ extern uptr kHighMemEnd, kMidMemBeg, kMi
#if SANITIZER_MYRIAD2
#include "asan_mapping_myriad.h"
+#elif defined(__sparc__) && SANITIZER_WORDSIZE == 64
+#include "asan_mapping_sparc64.h"
#else
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
===================================================================
@@ -0,0 +1,100 @@
+//===-- asan_mapping_sparc64.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// SPARC64-specific definitions for ASan memory mapping.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_MAPPING_SPARC64_H
+#define ASAN_MAPPING_SPARC64_H
+
+// This is tailored to the 52-bit VM layout on SPARC-T4 and later.
+// The VM space is split into two 51-bit halves at both ends: the low part
+// has all the bits above the 51st cleared, while the high part has them set.
+// 0xfff8000000000000 - 0xffffffffffffffff
+// 0x0000000000000000 - 0x0007ffffffffffff
+
+#define VMA_BITS 52
+#define HIGH_BITS (64 - VMA_BITS)
+
+// The idea is to chop the high bits before doing the scaling, so the two
+// parts become contiguous again and the usual scheme can be applied.
+
+#define MEM_TO_SHADOW(mem) \
+ ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET))
+
+#define kLowMemBeg 0
+#define kLowMemEnd (SHADOW_OFFSET - 1)
+
+#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+
+// But of course there is the huge hole between the high shadow memory,
+// which is in the low part, and the beginning of the high part.
+
+#define kHighMemBeg (-(1ULL << (VMA_BITS - 1)))
+
+#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
+
+#define kMidShadowBeg 0
+#define kMidShadowEnd 0
+
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+#define kZeroBaseShadowStart 0
+#define kZeroBaseMaxShadowStart (1 << 18)
+
+#define kShadowGapBeg (kLowShadowEnd + 1)
+#define kShadowGapEnd (kHighShadowBeg - 1)
+
+#define kShadowGap2Beg 0
+#define kShadowGap2End 0
+
+#define kShadowGap3Beg 0
+#define kShadowGap3End 0
+
+namespace __asan {
+
+static inline bool AddrIsInLowMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a <= kLowMemEnd;
+}
+
+static inline bool AddrIsInLowShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a >= kLowShadowBeg && a <= kLowShadowEnd;
+}
+
+static inline bool AddrIsInMidMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInMidShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInHighMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
+}
+
+static inline bool AddrIsInHighShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
+}
+
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+} // namespace __asan
+
+#endif // ASAN_MAPPING_SPARC64_H