@@ -1,4 +1,4 @@
-291d9f1baf75
+a070de932857
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
@@ -773,7 +773,6 @@
go/net/lookup_unix.go \
go/net/mac.go \
go/net/net.go \
- go/net/net_posix.go \
go/net/parse.go \
go/net/pipe.go \
go/net/port.go \
@@ -1117,6 +1116,7 @@
go/crypto/x509/pkcs8.go \
go/crypto/x509/root.go \
go/crypto/x509/root_unix.go \
+ go/crypto/x509/sec1.go \
go/crypto/x509/verify.go \
go/crypto/x509/x509.go
@@ -1245,10 +1245,17 @@
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
go_exp_types_files = \
+ go/exp/types/builtins.go \
go/exp/types/check.go \
go/exp/types/const.go \
+ go/exp/types/conversions.go \
+ go/exp/types/errors.go \
go/exp/types/exportdata.go \
+ go/exp/types/expr.go \
go/exp/types/gcimporter.go \
+ go/exp/types/operand.go \
+ go/exp/types/predicates.go \
+ go/exp/types/stmt.go \
go/exp/types/types.go \
go/exp/types/universe.go
go_exp_utf8string_files = \
@@ -1329,6 +1336,7 @@
go/image/jpeg/huffman.go \
go/image/jpeg/idct.go \
go/image/jpeg/reader.go \
+ go/image/jpeg/scan.go \
go/image/jpeg/writer.go
go_image_png_files = \
@@ -1332,7 +1332,7 @@
// selectWatch and the selectWatcher are a watchdog mechanism for running Select.
// If the selectWatcher notices that the select has been blocked for >1 second, it prints
-// an error describing the select and panics the entire test binary.
+// an error describing the select and panics the entire test binary.
var selectWatch struct {
sync.Mutex
once sync.Once
@@ -1700,6 +1700,20 @@
S8
}
+// The X in S15.S11.S1 and S16.S11.S1 annihilate.
+type S14 struct {
+ S15
+ S16
+}
+
+type S15 struct {
+ S11
+}
+
+type S16 struct {
+ S11
+}
+
var fieldTests = []FTest{
{struct{}{}, "", nil, 0},
{struct{}{}, "Foo", nil, 0},
@@ -1725,6 +1739,7 @@
{S5{}, "Y", []int{2, 0, 1}, 0},
{S10{}, "X", nil, 0},
{S10{}, "Y", []int{2, 0, 0, 1}, 0},
+ {S14{}, "X", nil, 0},
}
func TestFieldByIndex(t *testing.T) {
@@ -2046,6 +2061,24 @@
}
}
+func TestIndex(t *testing.T) {
+ xs := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Index(3).Interface().(byte)
+ if v != xs[3] {
+ t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3])
+ }
+ xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(xa).Index(2).Interface().(byte)
+ if v != xa[2] {
+ t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2])
+ }
+ s := "0123456789"
+ v = ValueOf(s).Index(3).Interface().(byte)
+ if v != s[3] {
+ t.Errorf("s.Index(3) = %v; expected %v", v, s[3])
+ }
+}
+
func TestSlice(t *testing.T) {
xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
v := ValueOf(xs).Slice(3, 5).Interface().([]int)
@@ -2058,7 +2091,6 @@
if !DeepEqual(v[0:5], xs[3:]) {
t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5])
}
-
xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int)
if len(v) != 3 {
@@ -2070,6 +2102,11 @@
if !DeepEqual(v[0:6], xa[2:]) {
t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6])
}
+ s := "0123456789"
+ vs := ValueOf(s).Slice(3, 5).Interface().(string)
+ if vs != s[3:5] {
+ t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
+ }
}
func TestVariadic(t *testing.T) {
@@ -2652,6 +2689,127 @@
}
}
+func TestOverflow(t *testing.T) {
+ if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
+ t.Errorf("%v wrongly overflows float64", 1e300)
+ }
+
+ maxFloat32 := float64((1<<24 - 1) << (127 - 23))
+ if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf {
+ t.Errorf("%v wrongly overflows float32", maxFloat32)
+ }
+ ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52))
+ if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", ovfFloat32)
+ }
+ if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", -ovfFloat32)
+ }
+
+ maxInt32 := int64(0x7fffffff)
+ if ovf := V(int32(0)).OverflowInt(maxInt32); ovf {
+ t.Errorf("%v wrongly overflows int32", maxInt32)
+ }
+ if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf {
+ t.Errorf("%v wrongly overflows int32", -int64(1)<<31)
+ }
+ ovfInt32 := int64(1 << 31)
+ if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf {
+ t.Errorf("%v should overflow int32", ovfInt32)
+ }
+
+ maxUint32 := uint64(0xffffffff)
+ if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf {
+ t.Errorf("%v wrongly overflows uint32", maxUint32)
+ }
+ ovfUint32 := uint64(1 << 32)
+ if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf {
+ t.Errorf("%v should overflow uint32", ovfUint32)
+ }
+}
+
+func checkSameType(t *testing.T, x, y interface{}) {
+ if TypeOf(x) != TypeOf(y) {
+ t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y))
+ }
+}
+
+func TestArrayOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ at := ArrayOf(10, TypeOf(T(1)))
+ v := New(at).Elem()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed array = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, Zero(ArrayOf(5, TypeOf(T(1)))).Interface(), [5]T{})
+}
+
+func TestSliceOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ st := SliceOf(TypeOf(T(1)))
+ v := MakeSlice(st, 10, 10)
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed slice = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, Zero(SliceOf(TypeOf(T1(1)))).Interface(), []T1{})
+}
+
+func TestChanOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ ct := ChanOf(BothDir, TypeOf(T("")))
+ v := MakeChan(ct, 2)
+ v.Send(ValueOf(T("hello")))
+ v.Send(ValueOf(T("world")))
+
+ sv1, _ := v.Recv()
+ sv2, _ := v.Recv()
+ s1 := sv1.String()
+ s2 := sv2.String()
+ if s1 != "hello" || s2 != "world" {
+ t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world")
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, Zero(ChanOf(BothDir, TypeOf(T1(1)))).Interface(), (chan T1)(nil))
+}
+
+func TestMapOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0))))
+ v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1)))
+
+ s := fmt.Sprint(v.Interface())
+ want := "map[a:1]"
+ if s != want {
+ t.Errorf("constructed map = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, Zero(MapOf(TypeOf(V(0)), TypeOf(K("")))).Interface(), map[V]K(nil))
+}
+
type B1 struct {
X int
Y int
@@ -122,9 +122,11 @@
panic("Not reached")
}
-// DeepEqual tests for deep equality. It uses normal == equality where possible
-// but will scan members of arrays, slices, maps, and fields of structs. It correctly
-// handles recursive types. Functions are equal only if they are both nil.
+// DeepEqual tests for deep equality. It uses normal == equality where
+// possible but will scan elements of arrays, slices, maps, and fields of
+// structs. In maps, keys are compared with == but elements use deep
+// equality. DeepEqual correctly handles recursive types. Functions are equal
+// only if they are both nil.
// An empty slice is not equal to a nil slice.
func DeepEqual(a1, a2 interface{}) bool {
if a1 == nil || a2 == nil {
@@ -14,3 +14,5 @@
func IsRO(v Value) bool {
return v.flag&flagRO != 0
}
+
+var ArrayOf = arrayOf
@@ -85,7 +85,7 @@
}
}
{
- // convert channel direction
+ // convert channel direction
m := make(map[<-chan int]chan int)
mv := ValueOf(m)
c1 := make(chan int)
@@ -187,8 +187,7 @@
// It panics if i is not in the range [0, NumOut()).
Out(i int) Type
- runtimeType() *runtimeType
- common() *commonType
+ common() *rtype
uncommon() *uncommonType
}
@@ -232,13 +231,11 @@
UnsafePointer
)
-type runtimeType commonType
-
-// commonType is the common implementation of most values.
+// rtype is the common implementation of most values.
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
-type commonType struct {
+type rtype struct {
kind uint8 // enumeration for C
align int8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
@@ -249,17 +246,17 @@
hashfn func(unsafe.Pointer, uintptr) // hash function
equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) // equality function
- string *string // string form; unnecessary but undeniably useful
- *uncommonType // (relatively) uncommon fields
- ptrToThis *runtimeType // pointer to this type, if used in binary or has methods
+ string *string // string form; unnecessary but undeniably useful
+ *uncommonType // (relatively) uncommon fields
+ ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
}
// Method on non-interface type
type method struct {
name *string // name of method
pkgPath *string // nil for exported Names; otherwise import path
- mtyp *runtimeType // method type (without receiver)
- typ *runtimeType // .(*FuncType) underneath (with receiver)
+ mtyp *rtype // method type (without receiver)
+ typ *rtype // .(*FuncType) underneath (with receiver)
tfn unsafe.Pointer // fn used for normal method call
}
@@ -284,72 +281,72 @@
// arrayType represents a fixed array type.
type arrayType struct {
- commonType `reflect:"array"`
- elem *runtimeType // array element type
- slice *runtimeType // slice type
- len uintptr
+ rtype `reflect:"array"`
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
}
// chanType represents a channel type.
type chanType struct {
- commonType `reflect:"chan"`
- elem *runtimeType // channel element type
- dir uintptr // channel direction (ChanDir)
+ rtype `reflect:"chan"`
+ elem *rtype // channel element type
+ dir uintptr // channel direction (ChanDir)
}
// funcType represents a function type.
type funcType struct {
- commonType `reflect:"func"`
- dotdotdot bool // last input parameter is ...
- in []*runtimeType // input parameter types
- out []*runtimeType // output parameter types
+ rtype `reflect:"func"`
+ dotdotdot bool // last input parameter is ...
+ in []*rtype // input parameter types
+ out []*rtype // output parameter types
}
// imethod represents a method on an interface type
type imethod struct {
- name *string // name of method
- pkgPath *string // nil for exported Names; otherwise import path
- typ *runtimeType // .(*FuncType) underneath
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *rtype // .(*FuncType) underneath
}
// interfaceType represents an interface type.
type interfaceType struct {
- commonType `reflect:"interface"`
- methods []imethod // sorted by hash
+ rtype `reflect:"interface"`
+ methods []imethod // sorted by hash
}
// mapType represents a map type.
type mapType struct {
- commonType `reflect:"map"`
- key *runtimeType // map key type
- elem *runtimeType // map element (value) type
+ rtype `reflect:"map"`
+ key *rtype // map key type
+ elem *rtype // map element (value) type
}
// ptrType represents a pointer type.
type ptrType struct {
- commonType `reflect:"ptr"`
- elem *runtimeType // pointer element (pointed at) type
+ rtype `reflect:"ptr"`
+ elem *rtype // pointer element (pointed at) type
}
// sliceType represents a slice type.
type sliceType struct {
- commonType `reflect:"slice"`
- elem *runtimeType // slice element type
+ rtype `reflect:"slice"`
+ elem *rtype // slice element type
}
// Struct field
type structField struct {
- name *string // nil for embedded fields
- pkgPath *string // nil for exported Names; otherwise import path
- typ *runtimeType // type of field
- tag *string // nil if no tag
- offset uintptr // byte offset of field within struct
+ name *string // nil for embedded fields
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *rtype // type of field
+ tag *string // nil if no tag
+ offset uintptr // byte offset of field within struct
}
// structType represents a struct type.
type structType struct {
- commonType `reflect:"struct"`
- fields []structField // sorted by offset
+ rtype `reflect:"struct"`
+ fields []structField // sorted by offset
}
/*
@@ -432,16 +429,9 @@
return *t.name
}
-func (t *commonType) toType() Type {
- if t == nil {
- return nil
- }
- return canonicalize(t)
-}
+func (t *rtype) rawString() string { return *t.string }
-func (t *commonType) rawString() string { return *t.string }
-
-func (t *commonType) String() string {
+func (t *rtype) String() string {
// For gccgo, strip out quoted strings.
s := *t.string
var q bool
@@ -458,9 +448,9 @@
return string(r[:j])
}
-func (t *commonType) Size() uintptr { return t.size }
+func (t *rtype) Size() uintptr { return t.size }
-func (t *commonType) Bits() int {
+func (t *rtype) Bits() int {
if t == nil {
panic("reflect: Bits of nil Type")
}
@@ -471,13 +461,13 @@
return int(t.size) * 8
}
-func (t *commonType) Align() int { return int(t.align) }
+func (t *rtype) Align() int { return int(t.align) }
-func (t *commonType) FieldAlign() int { return int(t.fieldAlign) }
+func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
-func (t *commonType) Kind() Kind { return Kind(t.kind & kindMask) }
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
-func (t *commonType) common() *commonType { return t }
+func (t *rtype) common() *rtype { return t }
func (t *uncommonType) Method(i int) (m Method) {
if t == nil || i < 0 || i >= len(t.methods) {
@@ -492,8 +482,8 @@
m.PkgPath = *p.pkgPath
fl |= flagRO
}
- mt := toCommonType(p.typ)
- m.Type = mt.toType()
+ mt := p.typ
+ m.Type = toType(mt)
x := new(unsafe.Pointer)
*x = p.tfn
m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir}
@@ -524,8 +514,8 @@
// TODO(rsc): 6g supplies these, but they are not
// as efficient as they could be: they have commonType
-// as the receiver instead of *commonType.
-func (t *commonType) NumMethod() int {
+// as the receiver instead of *rtype.
+func (t *rtype) NumMethod() int {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
@@ -533,7 +523,7 @@
return t.uncommonType.NumMethod()
}
-func (t *commonType) Method(i int) (m Method) {
+func (t *rtype) Method(i int) (m Method) {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
@@ -541,7 +531,7 @@
return t.uncommonType.Method(i)
}
-func (t *commonType) MethodByName(name string) (m Method, ok bool) {
+func (t *rtype) MethodByName(name string) (m Method, ok bool) {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
@@ -549,15 +539,15 @@
return t.uncommonType.MethodByName(name)
}
-func (t *commonType) PkgPath() string {
+func (t *rtype) PkgPath() string {
return t.uncommonType.PkgPath()
}
-func (t *commonType) Name() string {
+func (t *rtype) Name() string {
return t.uncommonType.Name()
}
-func (t *commonType) ChanDir() ChanDir {
+func (t *rtype) ChanDir() ChanDir {
if t.Kind() != Chan {
panic("reflect: ChanDir of non-chan type")
}
@@ -565,7 +555,7 @@
return ChanDir(tt.dir)
}
-func (t *commonType) IsVariadic() bool {
+func (t *rtype) IsVariadic() bool {
if t.Kind() != Func {
panic("reflect: IsVariadic of non-func type")
}
@@ -573,7 +563,7 @@
return tt.dotdotdot
}
-func (t *commonType) Elem() Type {
+func (t *rtype) Elem() Type {
switch t.Kind() {
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
@@ -594,7 +584,7 @@
panic("reflect: Elem of invalid type")
}
-func (t *commonType) Field(i int) StructField {
+func (t *rtype) Field(i int) StructField {
if t.Kind() != Struct {
panic("reflect: Field of non-struct type")
}
@@ -602,7 +592,7 @@
return tt.Field(i)
}
-func (t *commonType) FieldByIndex(index []int) StructField {
+func (t *rtype) FieldByIndex(index []int) StructField {
if t.Kind() != Struct {
panic("reflect: FieldByIndex of non-struct type")
}
@@ -610,7 +600,7 @@
return tt.FieldByIndex(index)
}
-func (t *commonType) FieldByName(name string) (StructField, bool) {
+func (t *rtype) FieldByName(name string) (StructField, bool) {
if t.Kind() != Struct {
panic("reflect: FieldByName of non-struct type")
}
@@ -618,7 +608,7 @@
return tt.FieldByName(name)
}
-func (t *commonType) FieldByNameFunc(match func(string) bool) (StructField, bool) {
+func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
if t.Kind() != Struct {
panic("reflect: FieldByNameFunc of non-struct type")
}
@@ -626,7 +616,7 @@
return tt.FieldByNameFunc(match)
}
-func (t *commonType) In(i int) Type {
+func (t *rtype) In(i int) Type {
if t.Kind() != Func {
panic("reflect: In of non-func type")
}
@@ -634,7 +624,7 @@
return toType(tt.in[i])
}
-func (t *commonType) Key() Type {
+func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type")
}
@@ -642,7 +632,7 @@
return toType(tt.key)
}
-func (t *commonType) Len() int {
+func (t *rtype) Len() int {
if t.Kind() != Array {
panic("reflect: Len of non-array type")
}
@@ -650,7 +640,7 @@
return int(tt.len)
}
-func (t *commonType) NumField() int {
+func (t *rtype) NumField() int {
if t.Kind() != Struct {
panic("reflect: NumField of non-struct type")
}
@@ -658,7 +648,7 @@
return len(tt.fields)
}
-func (t *commonType) NumIn() int {
+func (t *rtype) NumIn() int {
if t.Kind() != Func {
panic("reflect: NumIn of non-func type")
}
@@ -666,7 +656,7 @@
return len(tt.in)
}
-func (t *commonType) NumOut() int {
+func (t *rtype) NumOut() int {
if t.Kind() != Func {
panic("reflect: NumOut of non-func type")
}
@@ -674,7 +664,7 @@
return len(tt.out)
}
-func (t *commonType) Out(i int) Type {
+func (t *rtype) Out(i int) Type {
if t.Kind() != Func {
panic("reflect: Out of non-func type")
}
@@ -844,7 +834,7 @@
// FieldByIndex returns the nested field corresponding to index.
func (t *structType) FieldByIndex(index []int) (f StructField) {
- f.Type = Type(t.toType())
+ f.Type = toType(&t.rtype)
for i, x := range index {
if i > 0 {
ft := f.Type
@@ -915,13 +905,13 @@
f := &t.fields[i]
// Find name and type for field f.
var fname string
- var ntyp *commonType
+ var ntyp *rtype
if f.name != nil {
fname = *f.name
} else {
// Anonymous field of type T or *T.
// Name taken from type.
- ntyp = toCommonType(f.typ)
+ ntyp = f.typ
if ntyp.Kind() == Ptr {
ntyp = ntyp.Elem().common()
}
@@ -945,19 +935,23 @@
// Queue embedded struct fields for processing with next level,
// but only if we haven't seen a match yet at this level and only
- // if the embedded types haven't alredy been queued.
+ // if the embedded types haven't already been queued.
if ok || ntyp == nil || ntyp.Kind() != Struct {
continue
}
+ ntyp = toType(ntyp).common()
styp := (*structType)(unsafe.Pointer(ntyp))
if nextCount[styp] > 0 {
- nextCount[styp]++
+ nextCount[styp] = 2 // exact multiple doesn't matter
continue
}
if nextCount == nil {
nextCount = map[*structType]int{}
}
nextCount[styp] = 1
+ if count[t] > 1 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ }
var index []int
index = append(index, scan.index...)
index = append(index, i)
@@ -994,53 +988,6 @@
return t.FieldByNameFunc(func(s string) bool { return s == name })
}
-// Convert runtime type to reflect type.
-func toCommonType(p *runtimeType) *commonType {
- if p == nil {
- return nil
- }
- return (*commonType)(unsafe.Pointer(p))
-}
-
-// Canonicalize a Type.
-var canonicalType = make(map[string]Type)
-
-var canonicalTypeLock sync.RWMutex
-
-func canonicalize(t Type) Type {
- if t == nil {
- return nil
- }
- u := t.uncommon()
- var s string
- if u == nil || u.PkgPath() == "" {
- s = t.rawString()
- } else {
- s = u.PkgPath() + "." + u.Name()
- }
- canonicalTypeLock.RLock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.RUnlock()
- return r
- }
- canonicalTypeLock.RUnlock()
- canonicalTypeLock.Lock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.Unlock()
- return r
- }
- canonicalType[s] = t
- canonicalTypeLock.Unlock()
- return t
-}
-
-func toType(p *runtimeType) Type {
- if p == nil {
- return nil
- }
- return (*commonType)(unsafe.Pointer(p))
-}
-
// TypeOf returns the reflection Type of the value in the interface{}.
// TypeOf(nil) returns nil.
func TypeOf(i interface{}) Type {
@@ -1051,22 +998,18 @@
// ptrMap is the cache for PtrTo.
var ptrMap struct {
sync.RWMutex
- m map[*commonType]*ptrType
-}
-
-func (t *commonType) runtimeType() *runtimeType {
- return (*runtimeType)(unsafe.Pointer(t))
+ m map[*rtype]*ptrType
}
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
func PtrTo(t Type) Type {
- return t.(*commonType).ptrTo()
+ return t.(*rtype).ptrTo()
}
-func (ct *commonType) ptrTo() *commonType {
- if p := ct.ptrToThis; p != nil {
- return toCommonType(p)
+func (t *rtype) ptrTo() *rtype {
+ if p := t.ptrToThis; p != nil {
+ return p
}
// Otherwise, synthesize one.
@@ -1076,39 +1019,39 @@
// the type structures in read-only memory.
ptrMap.RLock()
if m := ptrMap.m; m != nil {
- if p := m[ct]; p != nil {
+ if p := m[t]; p != nil {
ptrMap.RUnlock()
- return &p.commonType
+ return &p.rtype
}
}
ptrMap.RUnlock()
ptrMap.Lock()
if ptrMap.m == nil {
- ptrMap.m = make(map[*commonType]*ptrType)
+ ptrMap.m = make(map[*rtype]*ptrType)
}
- p := ptrMap.m[ct]
+ p := ptrMap.m[t]
if p != nil {
// some other goroutine won the race and created it
ptrMap.Unlock()
- return &p.commonType
+ return &p.rtype
}
- s := "*" + *ct.string
+ s := "*" + *t.string
canonicalTypeLock.RLock()
r, ok := canonicalType[s]
canonicalTypeLock.RUnlock()
if ok {
- ptrMap.m[ct] = (*ptrType)(unsafe.Pointer(r.(*commonType)))
+ ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
ptrMap.Unlock()
- return r.(*commonType)
+ return r.(*rtype)
}
// initialize p using *byte's ptrType as a prototype.
p = new(ptrType)
- var ibyte interface{} = (*byte)(nil)
- bp := (*ptrType)(unsafe.Pointer(*(**runtimeType)(unsafe.Pointer(&ibyte))))
- *p = *bp
+ var iptr interface{} = (*unsafe.Pointer)(nil)
+ prototype := *(**ptrType)(unsafe.Pointer(&iptr))
+ *p = *prototype
p.string = &s
@@ -1117,50 +1060,58 @@
// Create a good hash for the new string by using
// the FNV-1 hash's mixing function to combine the
// old hash and the new "*".
- // p.hash = ct.hash*16777619 ^ '*'
+ // p.hash = fnv1(t.hash, '*')
// This is the gccgo version.
- p.hash = (ct.hash << 4) + 9
+ p.hash = (t.hash << 4) + 9
p.uncommonType = nil
p.ptrToThis = nil
- p.elem = (*runtimeType)(unsafe.Pointer(ct))
+ p.elem = t
- q := canonicalize(&p.commonType)
- p = (*ptrType)(unsafe.Pointer(q.(*commonType)))
+ q := canonicalize(&p.rtype)
+ p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
- ptrMap.m[ct] = p
+ ptrMap.m[t] = p
ptrMap.Unlock()
- return &p.commonType
+ return &p.rtype
}
-func (t *commonType) Implements(u Type) bool {
+// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
+func fnv1(x uint32, list ...byte) uint32 {
+ for _, b := range list {
+ x = x*16777619 ^ uint32(b)
+ }
+ return x
+}
+
+func (t *rtype) Implements(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.Implements")
}
if u.Kind() != Interface {
panic("reflect: non-interface type passed to Type.Implements")
}
- return implements(u.(*commonType), t)
+ return implements(u.(*rtype), t)
}
-func (t *commonType) AssignableTo(u Type) bool {
+func (t *rtype) AssignableTo(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.AssignableTo")
}
- uu := u.(*commonType)
+ uu := u.(*rtype)
return directlyAssignable(uu, t) || implements(uu, t)
}
-func (t *commonType) ConvertibleTo(u Type) bool {
+func (t *rtype) ConvertibleTo(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.ConvertibleTo")
}
- uu := u.(*commonType)
+ uu := u.(*rtype)
return convertOp(uu, t) != nil
}
// implements returns true if the type V implements the interface type T.
-func implements(T, V *commonType) bool {
+func implements(T, V *rtype) bool {
if T.Kind() != Interface {
return false
}
@@ -1218,7 +1169,7 @@
// http://golang.org/doc/go_spec.html#Assignability
// Ignoring the interface rules (implemented elsewhere)
// and the ideal constant rules (no ideal constants at run time).
-func directlyAssignable(T, V *commonType) bool {
+func directlyAssignable(T, V *rtype) bool {
// x's type V is identical to T?
if T == V {
return true
@@ -1234,7 +1185,7 @@
return haveIdenticalUnderlyingType(T, V)
}
-func haveIdenticalUnderlyingType(T, V *commonType) bool {
+func haveIdenticalUnderlyingType(T, V *rtype) bool {
if T == V {
return true
}
@@ -1330,3 +1281,286 @@
return false
}
+
+// The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
+var lookupCache struct {
+ sync.RWMutex
+ m map[cacheKey]*rtype
+}
+
+// A cacheKey is the key for use in the lookupCache.
+// Four values describe any of the types we are looking for:
+// type kind, one or two subtypes, and an extra integer.
+type cacheKey struct {
+ kind Kind
+ t1 *rtype
+ t2 *rtype
+ extra uintptr
+}
+
+// cacheGet looks for a type under the key k in the lookupCache.
+// If it finds one, it returns that type.
+// If not, it returns nil with the cache locked.
+// The caller is expected to use cachePut to unlock the cache.
+func cacheGet(k cacheKey) Type {
+ lookupCache.RLock()
+ t := lookupCache.m[k]
+ lookupCache.RUnlock()
+ if t != nil {
+ return t
+ }
+
+ lookupCache.Lock()
+ t = lookupCache.m[k]
+ if t != nil {
+ lookupCache.Unlock()
+ return t
+ }
+
+ if lookupCache.m == nil {
+ lookupCache.m = make(map[cacheKey]*rtype)
+ }
+
+ return nil
+}
+
+// cachePut stores the given type in the cache, unlocks the cache,
+// and returns the type. It is expected that the cache is locked
+// because cacheGet returned nil.
+func cachePut(k cacheKey, t *rtype) Type {
+ t = toType(t).common()
+ lookupCache.m[k] = t
+ lookupCache.Unlock()
+ return t
+}
+
+// ChanOf returns the channel type with the given direction and and element type.
+// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
+//
+// The gc runtime imposes a limit of 64 kB on channel element types.
+// If t's size is equal to or exceeds this limit, ChanOf panics.
+func ChanOf(dir ChanDir, t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
+ if ch := cacheGet(ckey); ch != nil {
+ return ch
+ }
+
+ // This restriction is imposed by the gc compiler and the runtime.
+ if typ.size >= 1<<16 {
+ lookupCache.Unlock()
+ panic("reflect.ChanOf: element size too large")
+ }
+
+ // Look in known types.
+ // TODO: Precedence when constructing string.
+ var s string
+ switch dir {
+ default:
+ lookupCache.Unlock()
+ panic("reflect.ChanOf: invalid dir")
+ case SendDir:
+ s = "chan<- " + *typ.string
+ case RecvDir:
+ s = "<-chan " + *typ.string
+ case BothDir:
+ s = "chan " + *typ.string
+ }
+
+ // Make a channel type.
+ var ichan interface{} = (chan unsafe.Pointer)(nil)
+ prototype := *(**chanType)(unsafe.Pointer(&ichan))
+ ch := new(chanType)
+ *ch = *prototype
+ ch.string = &s
+
+ // gccgo uses a different hash.
+ // ch.hash = fnv1(typ.hash, 'c', byte(dir))
+ ch.hash = 0
+ if dir&SendDir != 0 {
+ ch.hash += 1
+ }
+ if dir&RecvDir != 0 {
+ ch.hash += 2
+ }
+ ch.hash += typ.hash << 2
+ ch.hash <<= 3
+ ch.hash += 15
+
+ ch.elem = typ
+ ch.uncommonType = nil
+ ch.ptrToThis = nil
+
+ return cachePut(ckey, &ch.rtype)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics. TODO(rsc).
+func MapOf(key, elem Type) Type {
+ ktyp := key.(*rtype)
+ etyp := elem.(*rtype)
+
+ // TODO: Check for invalid key types.
+
+ // Look in cache.
+ ckey := cacheKey{Map, ktyp, etyp, 0}
+ if mt := cacheGet(ckey); mt != nil {
+ return mt
+ }
+
+ // Look in known types.
+ s := "map[" + *ktyp.string + "]" + *etyp.string
+
+ // Make a map type.
+ var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+ prototype := *(**mapType)(unsafe.Pointer(&imap))
+ mt := new(mapType)
+ *mt = *prototype
+ mt.string = &s
+
+ // gccgo uses a different hash
+ // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
+ mt.hash = ktyp.hash + etyp.hash + 2 + 14
+
+ mt.key = ktyp
+ mt.elem = etyp
+ mt.uncommonType = nil
+ mt.ptrToThis = nil
+
+ return cachePut(ckey, &mt.rtype)
+}
+
+// SliceOf returns the slice type with element type t.
+// For example, if t represents int, SliceOf(t) represents []int.
+func SliceOf(t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Slice, typ, nil, 0}
+ if slice := cacheGet(ckey); slice != nil {
+ return slice
+ }
+
+ // Look in known types.
+ s := "[]" + *typ.string
+
+ // Make a slice type.
+ var islice interface{} = ([]unsafe.Pointer)(nil)
+ prototype := *(**sliceType)(unsafe.Pointer(&islice))
+ slice := new(sliceType)
+ *slice = *prototype
+ slice.string = &s
+
+ // gccgo uses a different hash.
+ // slice.hash = fnv1(typ.hash, '[')
+ slice.hash = typ.hash + 1 + 13
+
+ slice.elem = typ
+ slice.uncommonType = nil
+ slice.ptrToThis = nil
+
+ return cachePut(ckey, &slice.rtype)
+}
+
+// ArrayOf returns the array type with the given count and element type.
+// For example, if t represents int, ArrayOf(5, t) represents [5]int.
+//
+// If the resulting type would be larger than the available address space,
+// ArrayOf panics.
+//
+// TODO(rsc): Unexported for now. Export once the alg field is set correctly
+// for the type. This may require significant work.
+func arrayOf(count int, elem Type) Type {
+ typ := elem.(*rtype)
+ slice := SliceOf(elem)
+
+ // Look in cache.
+ ckey := cacheKey{Array, typ, nil, uintptr(count)}
+ if slice := cacheGet(ckey); slice != nil {
+ return slice
+ }
+
+ // Look in known types.
+ s := "[" + strconv.Itoa(count) + "]" + *typ.string
+
+ // Make an array type.
+ var iarray interface{} = [1]unsafe.Pointer{}
+ prototype := *(**arrayType)(unsafe.Pointer(&iarray))
+ array := new(arrayType)
+ *array = *prototype
+ array.string = &s
+
+ // gccgo uses a different hash.
+ // array.hash = fnv1(typ.hash, '[')
+ // for n := uint32(count); n > 0; n >>= 8 {
+ // array.hash = fnv1(array.hash, byte(n))
+ // }
+ // array.hash = fnv1(array.hash, ']')
+ array.hash = typ.hash + 1 + 13
+
+ array.elem = typ
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
+ array.size = typ.size * uintptr(count)
+ array.align = typ.align
+ array.fieldAlign = typ.fieldAlign
+ // TODO: array.alg
+ // TODO: array.gc
+ array.uncommonType = nil
+ array.ptrToThis = nil
+ array.len = uintptr(count)
+ array.slice = slice.(*rtype)
+
+ return cachePut(ckey, &array.rtype)
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+var canonicalType = make(map[string]Type)
+
+var canonicalTypeLock sync.RWMutex
+
+func canonicalize(t Type) Type {
+ if t == nil {
+ return nil
+ }
+ u := t.uncommon()
+ var s string
+ if u == nil || u.PkgPath() == "" {
+ s = t.rawString()
+ } else {
+ s = u.PkgPath() + "." + u.Name()
+ }
+ canonicalTypeLock.RLock()
+ if r, ok := canonicalType[s]; ok {
+ canonicalTypeLock.RUnlock()
+ return r
+ }
+ canonicalTypeLock.RUnlock()
+ canonicalTypeLock.Lock()
+ if r, ok := canonicalType[s]; ok {
+ canonicalTypeLock.Unlock()
+ return r
+ }
+ canonicalType[s] = t
+ canonicalTypeLock.Unlock()
+ return t
+}
+
+func toType(p *rtype) Type {
+ if p == nil {
+ return nil
+ }
+ return canonicalize(p)
+}
@@ -60,7 +60,7 @@
// direct operations.
type Value struct {
// typ holds the type of the value represented by a Value.
- typ *commonType
+ typ *rtype
// val holds the 1-word representation of the value.
// If flag's flagIndir bit is set, then val is a pointer to the data.
@@ -211,7 +211,7 @@
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
- typ *runtimeType
+ typ *rtype
word iword
}
@@ -219,7 +219,7 @@
type nonEmptyInterface struct {
// see ../runtime/iface.c:/Itab
itab *struct {
- typ *runtimeType // dynamic concrete type
+ typ *rtype // dynamic concrete type
fun [100000]unsafe.Pointer // method table
}
word iword
@@ -372,7 +372,7 @@
if m.pkgPath != nil {
panic(method + " of unexported method")
}
- t = toCommonType(m.typ)
+ t = m.typ
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
panic(method + " of method on nil interface value")
@@ -389,7 +389,7 @@
panic(method + " of unexported method")
}
fn = m.tfn
- t = toCommonType(m.mtyp)
+ t = m.mtyp
rcvr = v.iword()
}
} else if v.flag&flagIndir != 0 {
@@ -474,7 +474,7 @@
first_pointer := false
for i, pv := range in {
pv.mustBeExported()
- targ := t.In(i).(*commonType)
+ targ := t.In(i).(*rtype)
pv = pv.assignTo("reflect.Value.Call", targ, nil)
if pv.flag&flagIndir == 0 {
p := new(unsafe.Pointer)
@@ -517,7 +517,7 @@
// gccgo specific test to see if typ is a method. We can tell by
// looking at the string to see if there is a receiver. We need this
// because for gccgo all methods take pointer receivers.
-func isMethod(t *commonType) bool {
+func isMethod(t *rtype) bool {
if Kind(t.kind) != Func {
return false
}
@@ -553,7 +553,7 @@
off := uintptr(0)
in := make([]Value, 0, len(ftyp.in))
for _, arg := range ftyp.in {
- typ := toCommonType(arg)
+ typ := arg
off += -off & uintptr(typ.align-1)
v := Value{typ, nil, flag(typ.Kind()) << flagKindShift}
if typ.size <= ptrSize {
@@ -582,7 +582,7 @@
if len(ftyp.out) > 0 {
off += -off & (ptrSize - 1)
for i, arg := range ftyp.out {
- typ := toCommonType(arg)
+ typ := arg
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(f) +
@@ -665,7 +665,7 @@
switch k {
case Interface:
var (
- typ *commonType
+ typ *rtype
val unsafe.Pointer
)
if v.typ.NumMethod() == 0 {
@@ -674,7 +674,7 @@
// nil interface value
return Value{}
}
- typ = toCommonType(eface.typ)
+ typ = eface.typ
val = unsafe.Pointer(eface.word)
} else {
iface := (*nonEmptyInterface)(v.val)
@@ -682,7 +682,7 @@
// nil interface value
return Value{}
}
- typ = toCommonType(iface.itab.typ)
+ typ = iface.itab.typ
val = unsafe.Pointer(iface.word)
}
fl := v.flag & flagRO
@@ -702,7 +702,7 @@
return Value{}
}
tt := (*ptrType)(unsafe.Pointer(v.typ))
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
fl |= flag(typ.Kind() << flagKindShift)
return Value{typ, val, fl}
@@ -719,7 +719,7 @@
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
- typ := toCommonType(field.typ)
+ typ := field.typ
// Inherit permission bits from v.
fl := v.flag & (flagRO | flagIndir | flagAddr)
@@ -802,8 +802,10 @@
panic(&ValueError{"reflect.Value.Float", k})
}
+var uint8Type = TypeOf(uint8(0)).(*rtype)
+
// Index returns v's i'th element.
-// It panics if v's Kind is not Array or Slice or i is out of range.
+// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
k := v.kind()
switch k {
@@ -812,7 +814,7 @@
if i < 0 || i > int(tt.len) {
panic("reflect: array index out of range")
}
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
@@ -840,10 +842,19 @@
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(s.Data + uintptr(i)*typ.size)
return Value{typ, val, fl}
+
+ case String:
+ fl := v.flag&flagRO | flag(Uint8<<flagKindShift) | flagIndir
+ s := (*StringHeader)(v.val)
+ if i < 0 || i >= s.Len {
+ panic("reflect: string index out of range")
+ }
+ val := *(*byte)(unsafe.Pointer(s.Data + uintptr(i)))
+ return Value{uint8Type, unsafe.Pointer(&val), fl}
}
panic(&ValueError{"reflect.Value.Index", k})
}
@@ -925,7 +936,7 @@
// Non-interface value.
var eface emptyInterface
- eface.typ = v.typ.runtimeType()
+ eface.typ = toType(v.typ).common()
eface.word = v.iword()
if v.flag&flagIndir != 0 && v.kind() != Ptr && v.kind() != UnsafePointer {
@@ -937,6 +948,10 @@
eface.word = iword(ptr)
}
+ if v.flag&flagIndir == 0 && v.kind() != Ptr && v.kind() != UnsafePointer {
+ panic("missing flagIndir")
+ }
+
return *(*interface{})(unsafe.Pointer(&eface))
}
@@ -1026,13 +1041,13 @@
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
- key = key.assignTo("reflect.Value.MapIndex", toCommonType(tt.key), nil)
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
- word, ok := mapaccess(v.typ.runtimeType(), *(*iword)(v.iword()), key.iword())
+ word, ok := mapaccess(v.typ, *(*iword)(v.iword()), key.iword())
if !ok {
return Value{}
}
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := (v.flag | key.flag) & flagRO
if typ.Kind() != Ptr && typ.Kind() != UnsafePointer {
fl |= flagIndir
@@ -1048,7 +1063,7 @@
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
- keyType := toCommonType(tt.key)
+ keyType := tt.key
fl := v.flag & flagRO
fl |= flag(keyType.Kind()) << flagKindShift
@@ -1061,7 +1076,7 @@
if m != nil {
mlen = maplen(m)
}
- it := mapiterinit(v.typ.runtimeType(), m)
+ it := mapiterinit(v.typ, m)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
@@ -1160,7 +1175,7 @@
if x < 0 {
x = -x
}
- return math.MaxFloat32 <= x && x <= math.MaxFloat64
+ return math.MaxFloat32 < x && x <= math.MaxFloat64
}
// OverflowInt returns true if the int64 x cannot be represented by v's type.
@@ -1230,9 +1245,9 @@
if ChanDir(tt.dir)&RecvDir == 0 {
panic("recv on send-only channel")
}
- word, selected, ok := chanrecv(v.typ.runtimeType(), *(*iword)(v.iword()), nb)
+ word, selected, ok := chanrecv(v.typ, *(*iword)(v.iword()), nb)
if selected {
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.Kind() != Ptr && typ.Kind() != UnsafePointer {
fl |= flagIndir
@@ -1259,8 +1274,8 @@
panic("send on recv-only channel")
}
x.mustBeExported()
- x = x.assignTo("reflect.Value.Send", toCommonType(tt.elem), nil)
- return chansend(v.typ.runtimeType(), *(*iword)(v.iword()), x.iword(), nb)
+ x = x.assignTo("reflect.Value.Send", tt.elem, nil)
+ return chansend(v.typ, *(*iword)(v.iword()), x.iword(), nb)
}
// Set assigns x to the value v.
@@ -1382,12 +1397,12 @@
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
- key = key.assignTo("reflect.Value.SetMapIndex", toCommonType(tt.key), nil)
+ key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
if val.typ != nil {
val.mustBeExported()
- val = val.assignTo("reflect.Value.SetMapIndex", toCommonType(tt.elem), nil)
+ val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
}
- mapassign(v.typ.runtimeType(), *(*iword)(v.iword()), key.iword(), val.iword(), val.typ != nil)
+ mapassign(v.typ, *(*iword)(v.iword()), key.iword(), val.iword(), val.typ != nil)
}
// SetUint sets v's underlying value to x.
@@ -1429,7 +1444,7 @@
}
// Slice returns a slice of v.
-// It panics if v's Kind is not Array or Slice.
+// It panics if v's Kind is not Array, Slice, or String.
func (v Value) Slice(beg, end int) Value {
var (
cap int
@@ -1439,21 +1454,34 @@
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.Slice", k})
+
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
- typ = (*sliceType)(unsafe.Pointer(toCommonType(tt.slice)))
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
+
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
+ case String:
+ s := (*StringHeader)(v.val)
+ if beg < 0 || end < beg || end > s.Len {
+ panic("reflect.Value.Slice: string slice index out of bounds")
+ }
+ var x string
+ val := (*StringHeader)(unsafe.Pointer(&x))
+ val.Data = s.Data + uintptr(beg)
+ val.Len = end - beg
+ return Value{v.typ, unsafe.Pointer(&x), v.flag}
}
+
if beg < 0 || end < beg || end > cap {
panic("reflect.Value.Slice: slice index out of bounds")
}
@@ -1463,7 +1491,7 @@
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
- s.Data = uintptr(base) + uintptr(beg)*toCommonType(typ.elem).Size()
+ s.Data = uintptr(base) + uintptr(beg)*typ.elem.Size()
s.Len = end - beg
s.Cap = cap - beg
@@ -1516,7 +1544,7 @@
}
if f&flagMethod == 0 {
// Easy case
- return v.typ.toType()
+ return toType(v.typ)
}
// Method value.
@@ -1529,7 +1557,7 @@
panic("reflect: broken Value")
}
m := &tt.methods[i]
- return toCommonType(m.typ).toType()
+ return toType(m.typ)
}
// Method on concrete type.
ut := v.typ.uncommon()
@@ -1537,7 +1565,7 @@
panic("reflect: broken Value")
}
m := &ut.methods[i]
- return toCommonType(m.mtyp).toType()
+ return toType(m.mtyp)
}
// Uint returns v's underlying value, as a uint64.
@@ -1711,10 +1739,10 @@
// A runtimeSelect is a single case passed to rselect.
// This must match ../runtime/chan.c:/runtimeSelect
type runtimeSelect struct {
- dir uintptr // 0, SendDir, or RecvDir
- typ *runtimeType // channel type
- ch iword // interface word for channel
- val iword // interface word for value (for SendDir)
+ dir uintptr // 0, SendDir, or RecvDir
+ typ *rtype // channel type
+ ch iword // interface word for channel
+ val iword // interface word for value (for SendDir)
}
// rselect runs a select. It returns the index of the chosen case,
@@ -1801,13 +1829,13 @@
panic("reflect.Select: SendDir case using recv-only channel")
}
rc.ch = *(*iword)(ch.iword())
- rc.typ = tt.runtimeType()
+ rc.typ = &tt.rtype
v := c.Send
if !v.IsValid() {
panic("reflect.Select: SendDir case missing Send value")
}
v.mustBeExported()
- v = v.assignTo("reflect.Select", toCommonType(tt.elem), nil)
+ v = v.assignTo("reflect.Select", tt.elem, nil)
rc.val = v.iword()
case SelectRecv:
@@ -1821,7 +1849,7 @@
ch.mustBe(Chan)
ch.mustBeExported()
tt := (*chanType)(unsafe.Pointer(ch.typ))
- rc.typ = tt.runtimeType()
+ rc.typ = &tt.rtype
if ChanDir(tt.dir)&RecvDir == 0 {
panic("reflect.Select: RecvDir case using send-only channel")
}
@@ -1831,8 +1859,8 @@
chosen, word, recvOK := rselect(runcases)
if runcases[chosen].dir == uintptr(SelectRecv) {
- tt := (*chanType)(unsafe.Pointer(toCommonType(runcases[chosen].typ)))
- typ := toCommonType(tt.elem)
+ tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
+ typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.Kind() != Ptr && typ.Kind() != UnsafePointer {
fl |= flagIndir
@@ -1847,8 +1875,8 @@
*/
// implemented in package runtime
-func unsafe_New(Type) unsafe.Pointer
-func unsafe_NewArray(Type, int) unsafe.Pointer
+func unsafe_New(*rtype) unsafe.Pointer
+func unsafe_NewArray(*rtype, int) unsafe.Pointer
// MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity.
@@ -1871,7 +1899,7 @@
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
- s.Data = uintptr(unsafe_NewArray(typ.Elem(), cap))
+ s.Data = uintptr(unsafe_NewArray(typ.Elem().(*rtype), cap))
s.Len = len
s.Cap = cap
@@ -1889,7 +1917,7 @@
if typ.ChanDir() != BothDir {
panic("reflect.MakeChan: unidirectional channel type")
}
- ch := makechan(typ.runtimeType(), uint64(buffer))
+ ch := makechan(typ.(*rtype), uint64(buffer))
return Value{typ.common(), unsafe.Pointer(ch), flagIndir | (flag(Chan) << flagKindShift)}
}
@@ -1898,7 +1926,7 @@
if typ.Kind() != Map {
panic("reflect.MakeMap of non-map type")
}
- m := makemap(typ.runtimeType())
+ m := makemap(typ.(*rtype))
return Value{typ.common(), unsafe.Pointer(m), flagIndir | (flag(Map) << flagKindShift)}
}
@@ -1929,7 +1957,7 @@
// For an interface value with the noAddr bit set,
// the representation is identical to an empty interface.
eface := *(*emptyInterface)(unsafe.Pointer(&i))
- typ := toCommonType(eface.typ)
+ typ := eface.typ
fl := flag(typ.Kind()) << flagKindShift
if typ.Kind() != Ptr && typ.Kind() != UnsafePointer {
fl |= flagIndir
@@ -1951,7 +1979,7 @@
if t.Kind() == Ptr || t.Kind() == UnsafePointer {
return Value{t, nil, fl}
}
- return Value{t, unsafe_New(typ), fl | flagIndir}
+ return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
// New returns a Value representing a pointer to a new zero value
@@ -1960,7 +1988,7 @@
if typ == nil {
panic("reflect: New(nil)")
}
- ptr := unsafe_New(typ)
+ ptr := unsafe_New(typ.(*rtype))
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), ptr, fl}
}
@@ -1975,7 +2003,7 @@
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
-func (v Value) assignTo(context string, dst *commonType, target *interface{}) Value {
+func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
if v.flag&flagMethod != 0 {
panic(context + ": cannot assign method value to type " + dst.String())
}
@@ -1997,7 +2025,7 @@
if dst.NumMethod() == 0 {
*target = x
} else {
- ifaceE2I(dst.runtimeType(), x, unsafe.Pointer(target))
+ ifaceE2I(dst, x, unsafe.Pointer(target))
}
return Value{dst, unsafe.Pointer(target), flagIndir | flag(Interface)<<flagKindShift}
}
@@ -2022,7 +2050,7 @@
// convertOp returns the function to convert a value of type src
// to a value of type dst. If the conversion is illegal, convertOp returns nil.
-func convertOp(dst, src *commonType) func(Value, Type) Value {
+func convertOp(dst, src *rtype) func(Value, Type) Value {
switch src.Kind() {
case Int, Int8, Int16, Int32, Int64:
switch dst.Kind() {
@@ -2109,9 +2137,9 @@
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be uint64.
- ptr := unsafe_New(t)
+ ptr := unsafe_New(typ)
*(*uint64)(unsafe.Pointer(ptr)) = bits
- return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
switch typ.size {
@@ -2133,9 +2161,9 @@
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be float64.
- ptr := unsafe_New(t)
+ ptr := unsafe_New(typ)
*(*float64)(unsafe.Pointer(ptr)) = v
- return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
@@ -2153,14 +2181,14 @@
func makeComplex(f flag, v complex128, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
- ptr := unsafe_New(t)
+ ptr := unsafe_New(typ)
switch typ.size {
case 8:
*(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
case 16:
*(*complex128)(unsafe.Pointer(ptr)) = v
}
- return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
// Assume ptrSize <= 8 so this must be complex64.
@@ -2172,21 +2200,21 @@
func makeString(f flag, v string, t Type) Value {
ret := New(t).Elem()
ret.SetString(v)
- ret.flag = ret.flag&^flagAddr | f
+ ret.flag = ret.flag&^flagAddr | f | flagIndir
return ret
}
func makeBytes(f flag, v []byte, t Type) Value {
ret := New(t).Elem()
ret.SetBytes(v)
- ret.flag = ret.flag&^flagAddr | f
+ ret.flag = ret.flag&^flagAddr | f | flagIndir
return ret
}
func makeRunes(f flag, v []rune, t Type) Value {
ret := New(t).Elem()
ret.setRunes(v)
- ret.flag = ret.flag&^flagAddr | f
+ ret.flag = ret.flag&^flagAddr | f | flagIndir
return ret
}
@@ -2287,7 +2315,7 @@
if typ.NumMethod() == 0 {
*target = x
} else {
- ifaceE2I(typ.runtimeType(), x, unsafe.Pointer(target))
+ ifaceE2I(typ.(*rtype), x, unsafe.Pointer(target))
}
return Value{typ.common(), unsafe.Pointer(target), v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
}
@@ -2306,20 +2334,20 @@
func chancap(ch iword) int
func chanclose(ch iword)
func chanlen(ch iword) int
-func chanrecv(t *runtimeType, ch iword, nb bool) (val iword, selected, received bool)
-func chansend(t *runtimeType, ch iword, val iword, nb bool) bool
+func chanrecv(t *rtype, ch iword, nb bool) (val iword, selected, received bool)
+func chansend(t *rtype, ch iword, val iword, nb bool) bool
-func makechan(typ *runtimeType, size uint64) (ch iword)
-func makemap(t *runtimeType) (m iword)
-func mapaccess(t *runtimeType, m iword, key iword) (val iword, ok bool)
-func mapassign(t *runtimeType, m iword, key, val iword, ok bool)
-func mapiterinit(t *runtimeType, m iword) *byte
+func makechan(typ *rtype, size uint64) (ch iword)
+func makemap(t *rtype) (m iword)
+func mapaccess(t *rtype, m iword, key iword) (val iword, ok bool)
+func mapassign(t *rtype, m iword, key, val iword, ok bool)
+func mapiterinit(t *rtype, m iword) *byte
func mapiterkey(it *byte) (key iword, ok bool)
func mapiternext(it *byte)
func maplen(m iword) int
-func call(typ *commonType, fnaddr unsafe.Pointer, isInterface bool, isMethod bool, params *unsafe.Pointer, results *unsafe.Pointer)
-func ifaceE2I(t *runtimeType, src interface{}, dst unsafe.Pointer)
+func call(typ *rtype, fnaddr unsafe.Pointer, isInterface bool, isMethod bool, params *unsafe.Pointer, results *unsafe.Pointer)
+func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
@@ -19,8 +19,9 @@
Ptrace bool // Enable tracing.
Setsid bool // Create session.
Setpgid bool // Set process group ID to new pid (SYSV setpgrp)
- Setctty bool // Set controlling terminal to fd 0
+ Setctty bool // Set controlling terminal to fd Ctty (only meaningful if Setsid is set)
Noctty bool // Detach fd 0 from controlling terminal
+ Ctty int // Controlling TTY fd (Linux only)
Pdeathsig Signal // Signal that the process will get when its parent dies (Linux only)
}
@@ -227,8 +228,8 @@
}
// Make fd 0 the tty
- if sys.Setctty {
- _, err1 = raw_ioctl(0, TIOCSCTTY, 0)
+ if sys.Setctty && sys.Ctty >= 0 {
+ _, err1 = raw_ioctl(0, TIOCSCTTY, sys.Ctty)
if err1 != 0 {
goto childerror
}
@@ -280,6 +280,9 @@
//sys sendfile(outfd int, infd int, offset *Offset_t, count int) (written int, err error)
//sendfile64(outfd int, infd int, offset *Offset_t, count Size_t) Ssize_t
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
var soff Offset_t
var psoff *Offset_t
if offset != nil {
@@ -9,16 +9,6 @@
#include "interface.h"
#include "go-panic.h"
-/* Go memory allocated by code not written in Go. We keep a linked
- list of these allocations so that the garbage collector can see
- them. */
-
-struct cgoalloc
-{
- struct cgoalloc *next;
- void *alloc;
-};
-
/* Prepare to call from code written in Go to code written in C or
C++. This takes the current goroutine out of the Go scheduler, as
though it were making a system call. Otherwise the program can
@@ -67,7 +57,7 @@
/* We are going back to Go, and we are not in a recursive call.
Let the garbage collector clean up any unreferenced
memory. */
- g->cgoalloc = NULL;
+ g->cgomal = NULL;
}
/* If we are invoked because the C function called _cgo_panic, then
@@ -100,15 +90,15 @@
{
void *ret;
G *g;
- struct cgoalloc *c;
+ CgoMal *c;
ret = __go_alloc (n);
g = runtime_g ();
- c = (struct cgoalloc *) __go_alloc (sizeof (struct cgoalloc));
- c->next = g->cgoalloc;
+ c = (CgoMal *) __go_alloc (sizeof (CgoMal));
+ c->next = g->cgomal;
c->alloc = ret;
- g->cgoalloc = c;
+ g->cgomal = c;
return ret;
}
@@ -15,6 +15,11 @@
#include "arch.h"
#include "malloc.h"
+/* Dummy word to use as base pointer for make([]T, 0).
+ Since you cannot take the address of such a slice,
+ you can't tell that they all have the same base pointer. */
+uintptr runtime_zerobase;
+
struct __go_open_array
__go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len,
uintptr_t cap)
@@ -24,7 +29,6 @@
intgo icap;
uintptr_t size;
struct __go_open_array ret;
- unsigned int flag;
__go_assert (td->__code == GO_SLICE);
std = (const struct __go_slice_type *) td;
@@ -44,10 +48,19 @@
ret.__capacity = icap;
size = cap * std->__element_type->__size;
- flag = ((std->__element_type->__code & GO_NO_POINTERS) != 0
- ? FlagNoPointers
- : 0);
- ret.__values = runtime_mallocgc (size, flag, 1, 1);
+
+ if (size == 0)
+ ret.__values = &runtime_zerobase;
+ else if ((std->__element_type->__code & GO_NO_POINTERS) != 0)
+ ret.__values = runtime_mallocgc (size, FlagNoPointers, 1, 1);
+ else
+ {
+ ret.__values = runtime_mallocgc (size, 0, 1, 1);
+
+ if (UseSpanType)
+ runtime_settype (ret.__values,
+ (uintptr) std->__element_type | TypeInfo_Array);
+ }
return ret;
}
@@ -32,7 +32,7 @@
intgo line;
if (__go_file_line (pcbuf[i], &fn, &file, &line)
- && runtime_showframe (fn.str))
+ && runtime_showframe (fn))
{
runtime_printf ("%S\n", fn);
runtime_printf ("\t%S:%D\n", file, (int64) line);
@@ -5,28 +5,30 @@
license that can be found in the LICENSE file. */
#include "runtime.h"
-#include "go-alloc.h"
+#include "arch.h"
+#include "malloc.h"
#include "go-type.h"
#include "interface.h"
/* Implement unsafe_New, called from the reflect package. */
-void *unsafe_New (struct __go_empty_interface type)
+void *unsafe_New (const struct __go_type_descriptor *)
asm ("reflect.unsafe_New");
/* The dynamic type of the argument will be a pointer to a type
descriptor. */
void *
-unsafe_New (struct __go_empty_interface type)
+unsafe_New (const struct __go_type_descriptor *descriptor)
{
- const struct __go_type_descriptor *descriptor;
+ uint32 flag;
+ void *ret;
- if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0)
- runtime_panicstring ("invalid interface value");
+ flag = (descriptor->__code & GO_NO_POINTERS) != 0 ? FlagNoPointers : 0;
+ ret = runtime_mallocgc (descriptor->__size, flag, 1, 1);
- /* FIXME: We should check __type_descriptor to verify that this is
- really a type descriptor. */
- descriptor = (const struct __go_type_descriptor *) type.__object;
- return __go_alloc (descriptor->__size);
+ if (UseSpanType && flag == 0)
+ runtime_settype (ret, (uintptr) descriptor | TypeInfo_SingleObject);
+
+ return ret;
}
@@ -5,28 +5,37 @@
license that can be found in the LICENSE file. */
#include "runtime.h"
-#include "go-alloc.h"
+#include "arch.h"
+#include "malloc.h"
#include "go-type.h"
#include "interface.h"
/* Implement unsafe_NewArray, called from the reflect package. */
-void *unsafe_NewArray (struct __go_empty_interface type, int n)
+void *unsafe_NewArray (const struct __go_type_descriptor *, intgo)
asm ("reflect.unsafe_NewArray");
/* The dynamic type of the argument will be a pointer to a type
descriptor. */
void *
-unsafe_NewArray (struct __go_empty_interface type, int n)
+unsafe_NewArray (const struct __go_type_descriptor *descriptor, intgo n)
{
- const struct __go_type_descriptor *descriptor;
+ uint64 size;
+ void *ret;
- if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0)
- runtime_panicstring ("invalid interface value");
+ size = n * descriptor->__size;
+ if (size == 0)
+ ret = &runtime_zerobase;
+ else if ((descriptor->__code & GO_NO_POINTERS) != 0)
+ ret = runtime_mallocgc (size, FlagNoPointers, 1, 1);
+ else
+ {
+ ret = runtime_mallocgc (size, 0, 1, 1);
- /* FIXME: We should check __type_descriptor to verify that this is
- really a type descriptor. */
- descriptor = (const struct __go_type_descriptor *) type.__object;
- return __go_alloc (descriptor->__size * n);
+ if (UseSpanType)
+ runtime_settype (ret, (uintptr) descriptor | TypeInfo_Array);
+ }
+
+ return ret;
}
@@ -20,7 +20,7 @@
MHeap runtime_mheap;
-extern MStats mstats; // defined in extern.go
+extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime_MemProfileRate
__asm__ ("runtime.MemProfileRate");
@@ -341,32 +341,30 @@
// enough to hold 4 bits per allocated word.
if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
// On a 64-bit machine, allocate from a single contiguous reservation.
- // 16 GB should be big enough for now.
+ // 128 GB (MaxMem) should be big enough for now.
//
// The code will work with the reservation at any address, but ask
- // SysReserve to use 0x000000f800000000 if possible.
- // Allocating a 16 GB region takes away 36 bits, and the amd64
+ // SysReserve to use 0x000000c000000000 if possible.
+ // Allocating a 128 GB region takes away 37 bits, and the amd64
// doesn't let us choose the top 17 bits, so that leaves the 11 bits
- // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
- // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
- // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
- // they are otherwise as far from ff (likely a common byte) as possible.
- // Choosing 0x00 for the leading 6 bits was more arbitrary, but it
- // is not a common ASCII code point either. Using 0x11f8 instead
+ // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
+ // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x0x00df.
+ // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
+ // UTF-8 sequences, and they are otherwise as far away from
+ // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8
// caused out of memory errors on OS X during thread allocations.
// These choices are both for debuggability and to reduce the
// odds of the conservative garbage collector not collecting memory
// because some non-pointer block of memory had a bit pattern
// that matched a memory address.
//
- // Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
- // but it hardly matters: fc is not valid UTF-8 either, and we have to
- // allocate 15 GB before we get that far.
+ // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
+ // but it hardly matters: e0 00 is not valid UTF-8 either.
//
// If this fails we fall back to the 32 bit memory mechanism
- arena_size = (uintptr)(16LL<<30);
+ arena_size = MaxMem;
bitmap_size = arena_size / (sizeof(void*)*8/4);
- p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
+ p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size);
}
if (p == nil) {
// On a 32-bit machine, we can't typically get away
@@ -455,6 +453,8 @@
runtime_SysMap(p, n);
h->arena_used += n;
runtime_MHeap_MapBits(h);
+ if(raceenabled)
+ runtime_racemapshadow(p, n);
return p;
}
@@ -481,6 +481,8 @@
if(h->arena_used > h->arena_end)
h->arena_end = h->arena_used;
runtime_MHeap_MapBits(h);
+ if(raceenabled)
+ runtime_racemapshadow(p, n);
}
return p;
@@ -709,12 +711,13 @@
}
void *
-runtime_new(Type *typ)
+runtime_new(const Type *typ)
{
void *ret;
uint32 flag;
- runtime_m()->racepc = runtime_getcallerpc(&typ);
+ if(raceenabled)
+ runtime_m()->racepc = runtime_getcallerpc(&typ);
flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
ret = runtime_mallocgc(typ->__size, flag, 1, 1);
@@ -114,12 +114,12 @@
HeapAllocChunk = 1<<20, // Chunk size for heap growth
// Number of bits in page to span calculations (4k pages).
- // On 64-bit, we limit the arena to 16G, so 22 bits suffices.
- // On 32-bit, we don't bother limiting anything: 20 bits for 4G.
+ // On 64-bit, we limit the arena to 128GB, or 37 bits.
+ // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
#if __SIZEOF_POINTER__ == 8
- MHeapMap_Bits = 22,
+ MHeapMap_Bits = 37 - PageShift,
#else
- MHeapMap_Bits = 20,
+ MHeapMap_Bits = 32 - PageShift,
#endif
// Max number of threads to run garbage collection.
@@ -133,7 +133,7 @@
// This must be a #define instead of an enum because it
// is so large.
#if __SIZEOF_POINTER__ == 8
-#define MaxMem (16ULL<<30) /* 16 GB */
+#define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB */
#else
#define MaxMem ((uintptr)-1)
#endif
@@ -198,7 +198,7 @@
// Statistics.
-// Shared with Go: if you edit this structure, also edit extern.go.
+// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
struct MStats
{
// General statistics.
@@ -358,7 +358,7 @@
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
uint32 ref; // number of allocated objects in this span
- uint32 sizeclass; // size class
+ int32 sizeclass; // size class
uintptr elemsize; // computed from sizeclass or from npages
uint32 state; // MSpanInUse etc
int64 unusedsince; // First time spotted by GC in MSpanFree state
@@ -452,6 +452,8 @@
bool runtime_blockspecial(void*);
void runtime_setblockspecial(void*, bool);
void runtime_purgecachedstats(MCache*);
+void* runtime_new(const Type *);
+#define runtime_cnew(T) runtime_new(T)
void runtime_settype(void*, uintptr);
void runtime_settype_flush(M*, bool);
@@ -487,3 +489,8 @@
// Enables type information at the end of blocks allocated from heap
DebugTypeAtBlockEnd = 0,
};
+
+// defined in mgc0.go
+void runtime_gc_m_ptr(Eface*);
+
+void runtime_memorydump(void);
@@ -874,6 +874,81 @@
}
}
+static void
+dumpspan(uint32 idx)
+{
+ int32 sizeclass, n, npages, i, column;
+ uintptr size;
+ byte *p;
+ byte *arena_start;
+ MSpan *s;
+ bool allocated, special;
+
+ s = runtime_mheap.allspans[idx];
+ if(s->state != MSpanInUse)
+ return;
+ arena_start = runtime_mheap.arena_start;
+ p = (byte*)(s->start << PageShift);
+ sizeclass = s->sizeclass;
+ size = s->elemsize;
+ if(sizeclass == 0) {
+ n = 1;
+ } else {
+ npages = runtime_class_to_allocnpages[sizeclass];
+ n = (npages << PageShift) / size;
+ }
+
+ runtime_printf("%p .. %p:\n", p, p+n*size);
+ column = 0;
+ for(; n>0; n--, p+=size) {
+ uintptr off, *bitp, shift, bits;
+
+ off = (uintptr*)p - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ bits = *bitp>>shift;
+
+ allocated = ((bits & bitAllocated) != 0);
+ special = ((bits & bitSpecial) != 0);
+
+ for(i=0; (uint32)i<size; i+=sizeof(void*)) {
+ if(column == 0) {
+ runtime_printf("\t");
+ }
+ if(i == 0) {
+ runtime_printf(allocated ? "(" : "[");
+ runtime_printf(special ? "@" : "");
+ runtime_printf("%p: ", p+i);
+ } else {
+ runtime_printf(" ");
+ }
+
+ runtime_printf("%p", *(void**)(p+i));
+
+ if(i+sizeof(void*) >= size) {
+ runtime_printf(allocated ? ") " : "] ");
+ }
+
+ column++;
+ if(column == 8) {
+ runtime_printf("\n");
+ column = 0;
+ }
+ }
+ }
+ runtime_printf("\n");
+}
+
+// A debugging function to dump the contents of memory
+void
+runtime_memorydump(void)
+{
+ uint32 spanidx;
+
+ for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
+ dumpspan(spanidx);
+ }
+}
void
runtime_gchelper(void)
{
@@ -1141,9 +1216,6 @@
FinBlock *fb, *next;
uint32 i;
- if(raceenabled)
- runtime_racefingo();
-
for(;;) {
// There's no need for a lock in this section
// because it only conflicts with the garbage
@@ -1158,6 +1230,8 @@
runtime_park(nil, nil, "finalizer wait");
continue;
}
+ if(raceenabled)
+ runtime_racefingo();
for(; fb; fb=next) {
next = fb->next;
for(i=0; i<(uint32)fb->cnt; i++) {
@@ -343,6 +343,15 @@
runtime_MSpanList_Insert(&h->large, s);
}
+static void
+forcegchelper(void *vnote)
+{
+ Note *note = (Note*)vnote;
+
+ runtime_gc(1);
+ runtime_notewakeup(note);
+}
+
// Release (part of) unused memory to OS.
// Goroutine created at startup.
// Loop forever.
@@ -356,7 +365,7 @@
uintptr released, sumreleased;
const byte *env;
bool trace;
- Note note;
+ Note note, *notep;
USED(dummy);
@@ -387,7 +396,15 @@
now = runtime_nanotime();
if(now - mstats.last_gc > forcegc) {
runtime_unlock(h);
- runtime_gc(1);
+ // The scavenger can not block other goroutines,
+ // otherwise deadlock detector can fire spuriously.
+ // GC blocks other goroutines via the runtime_worldsema.
+ runtime_noteclear(¬e);
+ notep = ¬e;
+ __go_go(forcegchelper, (void*)¬ep);
+ runtime_entersyscall();
+ runtime_notesleep(¬e);
+ runtime_exitsyscall();
runtime_lock(h);
now = runtime_nanotime();
if (trace)
@@ -315,8 +315,7 @@
runtime_unlock(&proflock);
}
-// Go interface to profile data. (Declared in extern.go)
-// Assumes Go sizeof(int) == sizeof(int32)
+// Go interface to profile data. (Declared in debug.go)
// Must match MemProfileRecord in debug.go.
typedef struct Record Record;
@@ -18,6 +18,7 @@
#include "defs.h"
#include "malloc.h"
#include "race.h"
+#include "go-type.h"
#include "go-defer.h"
#ifdef USING_SPLIT_STACK
@@ -237,7 +238,7 @@
Lock;
G *gfree; // available g's (status == Gdead)
- int32 goidgen;
+ int64 goidgen;
G *ghead; // g's waiting to run
G *gtail;
@@ -601,7 +602,7 @@
status = "???";
break;
}
- runtime_printf("goroutine %d [%s]:\n", gp->goid, status);
+ runtime_printf("goroutine %D [%s]:\n", gp->goid, status);
}
void
@@ -745,7 +746,7 @@
// If g is the idle goroutine for an m, hand it off.
if(gp->idlem != nil) {
if(gp->idlem->idleg != nil) {
- runtime_printf("m%d idle out of sync: g%d g%d\n",
+ runtime_printf("m%d idle out of sync: g%D g%D\n",
gp->idlem->id,
gp->idlem->idleg->goid, gp->goid);
runtime_throw("runtime: double idle");
@@ -847,7 +848,7 @@
// Mark runnable.
if(gp->status == Grunnable || gp->status == Grunning) {
- runtime_printf("goroutine %d has status %d\n", gp->goid, gp->status);
+ runtime_printf("goroutine %D has status %d\n", gp->goid, gp->status);
runtime_throw("bad g->status in ready");
}
gp->status = Grunnable;
@@ -1204,7 +1205,16 @@
pthread_t tid;
size_t stacksize;
- mp = runtime_malloc(sizeof(M));
+#if 0
+ static const Type *mtype; // The Go type M
+ if(mtype == nil) {
+ Eface e;
+ runtime_gc_m_ptr(&e);
+ mtype = ((const PtrType*)e.__type_descriptor)->__element_type;
+ }
+#endif
+
+ mp = runtime_mal(sizeof *mp);
mcommoninit(mp);
mp->g0 = runtime_malg(-1, nil, nil);
@@ -1513,9 +1523,9 @@
byte *sp;
size_t spsize;
G *newg;
- int32 goid;
+ int64 goid;
- goid = runtime_xadd((uint32*)&runtime_sched.goidgen, 1);
+ goid = runtime_xadd64((uint64*)&runtime_sched.goidgen, 1);
if(raceenabled)
runtime_racegostart(goid, runtime_getcallerpc(&fn));
@@ -15,6 +15,7 @@
// Finalize race detection subsystem, does not return.
void runtime_racefini(void);
+void runtime_racemapshadow(void *addr, uintptr size);
void runtime_racemalloc(void *p, uintptr sz, void *pc);
void runtime_racefree(void *p);
void runtime_racegostart(int32 goid, void *pc);
@@ -159,13 +159,13 @@
}
bool
-runtime_showframe(const unsigned char *s)
+runtime_showframe(String s)
{
static int32 traceback = -1;
if(traceback < 0)
traceback = runtime_gotraceback();
- return traceback > 1 || (s != nil && __builtin_strchr((const char*)s, '.') != nil && __builtin_memcmp(s, "runtime.", 7) != 0);
+ return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
}
static Lock ticksLock;
@@ -63,6 +63,7 @@
typedef struct LFNode LFNode;
typedef struct ParFor ParFor;
typedef struct ParForThread ParForThread;
+typedef struct CgoMal CgoMal;
typedef struct __go_open_array Slice;
typedef struct String String;
@@ -72,13 +73,14 @@
typedef struct __go_defer_stack Defer;
typedef struct __go_panic_stack Panic;
+typedef struct __go_ptr_type PtrType;
typedef struct __go_func_type FuncType;
typedef struct __go_map_type MapType;
typedef struct Traceback Traceback;
/*
- * per-cpu declaration.
+ * Per-CPU declaration.
*/
extern M* runtime_m(void);
extern G* runtime_g(void);
@@ -159,7 +161,7 @@
void* param; // passed parameter on wakeup
bool fromgogo; // reached from gogo
int16 status;
- int32 goid;
+ int64 goid;
uint32 selgen; // valid sudog pointer
const char* waitreason; // if status==Gwaiting
G* schedlink;
@@ -178,7 +180,7 @@
uintptr gopc; // pc of go statement that created this goroutine
int32 ncgo;
- struct cgoalloc *cgoalloc;
+ CgoMal* cgomal;
Traceback* traceback;
@@ -201,7 +203,7 @@
int32 profilehz;
int32 helpgc;
uint32 fastrand;
- uint64 ncgocall;
+ uint64 ncgocall; // number of cgo calls in total
Note havenextg;
G* nextg;
M* alllink; // on allm
@@ -316,6 +318,14 @@
uint64 nsleep;
};
+// Track memory allocated by code not written in Go during a cgo call,
+// so that the garbage collector can see them.
+struct CgoMal
+{
+ CgoMal *next;
+ byte *alloc;
+};
+
/*
* defined macros
* you need super-gopher-guru privilege
@@ -329,6 +339,7 @@
/*
* external data
*/
+extern uintptr runtime_zerobase;
G* runtime_allg;
G* runtime_lastg;
M* runtime_allm;
@@ -569,7 +580,7 @@
void runtime_LockOSThread(void) __asm__("runtime.LockOSThread");
void runtime_UnlockOSThread(void) __asm__("runtime.UnlockOSThread");
-bool runtime_showframe(const unsigned char*);
+bool runtime_showframe(String);
uintptr runtime_memlimit(void);