From c253d8566aef2074b98f5054de3d2c9fd6cc6a05 Mon Sep 17 00:00:00 2001 From: Chewxy Date: Sat, 12 Dec 2020 09:40:52 +1100 Subject: [PATCH] Cuda11 (#99) * Fixed #90 * Removed everything that is unsafe.Pointer to use uintptr. Any thing that requires a unsafe.Pointer to remain will have to use a refcounter. * genlib'd the RC stuff into scalarHeaders * Fixed so -race will not complain * Updated travis to make sure things test with race as well * Added some tests for Float64Engine and Float32Engine * Moved to using raw byte slices as per Bryan C Mills' suggestion * More fixed from just moving to raw byte slices * Fixed more things for array * Fixed tests * Fixed all syntax errors * removed .v from array * Fixed some off that scalar business * Fixed the slice bits * tests pass * Added benchmark script * Fixed eng_arith_manual * Fixed inplace transpose as well --- .travis/test.sh | 3 +- api_arith_test.go | 1148 +++++++++++----------- api_matop.go | 2 +- array.go | 316 +++--- array_getset.go | 164 ++-- bench.sh | 23 + consopt.go | 40 +- defaultengine.go | 145 ++- defaultengine_arith.go | 84 +- defaultengine_cmp.go | 96 +- defaultengine_matop_misc.go | 4 +- defaultengine_matop_stack.go | 6 +- defaultengine_matop_transpose.go | 3 +- defaultengine_matop_transpose_inplace.go | 3 +- defaultengine_prep.go | 8 +- defaultenginefloat32.go | 11 +- defaultenginefloat32_test.go | 42 + defaultenginefloat64.go | 8 +- defaultenginefloat64_test.go | 42 + dense.go | 65 +- dense_assign.go | 10 +- dense_io.go | 2 +- dense_matop.go | 1 - dense_matop_test.go | 2 + engine.go | 5 - genlib2/agg1_body.go | 66 +- genlib2/agg2_body.go | 36 +- genlib2/array_getset.go | 81 +- genlib2/dense_io.go | 2 +- genlib2/main.go | 1 + internal/execution/e.go | 2 +- internal/execution/eng_arith.go | 144 +-- internal/execution/eng_arith_manual.go | 22 +- internal/execution/eng_cmp.go | 144 +-- internal/execution/eng_map.go | 2 +- internal/storage/consts.go | 29 + internal/storage/getset.go | 72 +- internal/storage/header.go | 67 +- known_race_test.go | 1 + perf.go | 9 +- sparse.go | 14 +- tensor.go | 5 +- testutils_test.go | 13 +- 43 files changed, 1534 insertions(+), 1409 deletions(-) create mode 100755 bench.sh create mode 100644 defaultenginefloat32_test.go create mode 100644 defaultenginefloat64_test.go create mode 100644 internal/storage/consts.go diff --git a/.travis/test.sh b/.travis/test.sh index 37fdd87..381a409 100644 --- a/.travis/test.sh +++ b/.travis/test.sh @@ -6,6 +6,7 @@ go test -v -a -covermode=atomic -coverprofile=test.cover . go test -tags='avx' -a -covermode=atomic -coverprofile=avx.cover . go test -tags='sse' -a -covermode=atomic -coverprofile=sse.cover . go test -tags='inplacetranspose' -a -covermode=atomic -coverprofile=inplacetranspose.cover . +go test -race -a . go test -a -covermode=atomic -coverprofile=native.cover ./native/. # because coveralls only accepts one coverage file at one time... we combine them into one gigantic one @@ -14,4 +15,4 @@ echo "mode: set" > ./final.cover tail -q -n +2 "${covers[@]}" >> ./final.cover goveralls -coverprofile=./final.cover -service=travis-ci -set +ex \ No newline at end of file +set +ex diff --git a/api_arith_test.go b/api_arith_test.go index ca45f8f..75a4838 100644 --- a/api_arith_test.go +++ b/api_arith_test.go @@ -1,574 +1,574 @@ -package tensor - -import ( - "log" - "math/rand" - "testing" - "testing/quick" - "time" - - "github.com/stretchr/testify/assert" -) - -// This file contains the tests for API functions that aren't generated by genlib - -func TestMod(t *testing.T) { - a := New(WithBacking([]float64{1, 2, 3, 4})) - b := New(WithBacking([]float64{1, 1, 1, 1})) - var correct interface{} = []float64{0, 0, 0, 0} - - // vec-vec - res, err := Mod(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar - if res, err = Mod(a, 1.0); err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestFMA(t *testing.T) { - same := func(q *Dense) bool { - a := q.Clone().(*Dense) - x := q.Clone().(*Dense) - y := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...)) - y.Memset(identityVal(100, q.Dtype())) - WithEngine(q.Engine())(y) - y2 := y.Clone().(*Dense) - - we, willFailEq := willerr(a, numberTypes, nil) - _, ok1 := q.Engine().(FMAer) - _, ok2 := q.Engine().(Muler) - _, ok3 := q.Engine().(Adder) - we = we || (!ok1 && (!ok2 || !ok3)) - - f, err := FMA(a, x, y) - if err, retEarly := qcErrCheck(t, "FMA#1", a, x, we, err); retEarly { - if err != nil { - log.Printf("q.Engine() %T", q.Engine()) - return false - } - return true - } - - we, _ = willerr(a, numberTypes, nil) - _, ok := a.Engine().(Muler) - we = we || !ok - wi, err := Mul(a, x, WithIncr(y2)) - if err, retEarly := qcErrCheck(t, "FMA#2", a, x, we, err); retEarly { - if err != nil { - return false - } - return true - } - return qcEqCheck(t, q.Dtype(), willFailEq, wi, f) - } - r := rand.New(rand.NewSource(time.Now().UnixNano())) - if err := quick.Check(same, &quick.Config{Rand: r}); err != nil { - t.Error(err) - } - - // specific engines - var eng Engine - - // FLOAT64 ENGINE - - // vec-vec - eng = Float64Engine{} - a := New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) - x := New(WithBacking(Range(Float64, 1, 101)), WithEngine(eng)) - y := New(Of(Float64), WithShape(100), WithEngine(eng)) - - f, err := FMA(a, x, y) - if err != nil { - t.Fatal(err) - } - - a2 := New(WithBacking(Range(Float64, 0, 100))) - x2 := New(WithBacking(Range(Float64, 1, 101))) - y2 := New(Of(Float64), WithShape(100)) - f2, err := Mul(a2, x2, WithIncr(y2)) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, f.Data(), f2.Data()) - - // vec-scalar - a = New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) - y = New(Of(Float64), WithShape(100)) - - if f, err = FMA(a, 2.0, y); err != nil { - t.Fatal(err) - } - - a2 = New(WithBacking(Range(Float64, 0, 100))) - y2 = New(Of(Float64), WithShape(100)) - if f2, err = Mul(a2, 2.0, WithIncr(y2)); err != nil { - t.Fatal(err) - } - - assert.Equal(t, f.Data(), f2.Data()) - - // FLOAT32 engine - eng = Float32Engine{} - a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) - x = New(WithBacking(Range(Float32, 1, 101)), WithEngine(eng)) - y = New(Of(Float32), WithShape(100), WithEngine(eng)) - - f, err = FMA(a, x, y) - if err != nil { - t.Fatal(err) - } - - a2 = New(WithBacking(Range(Float32, 0, 100))) - x2 = New(WithBacking(Range(Float32, 1, 101))) - y2 = New(Of(Float32), WithShape(100)) - f2, err = Mul(a2, x2, WithIncr(y2)) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, f.Data(), f2.Data()) - - // vec-scalar - a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) - y = New(Of(Float32), WithShape(100)) - - if f, err = FMA(a, float32(2), y); err != nil { - t.Fatal(err) - } - - a2 = New(WithBacking(Range(Float32, 0, 100))) - y2 = New(Of(Float32), WithShape(100)) - if f2, err = Mul(a2, float32(2), WithIncr(y2)); err != nil { - t.Fatal(err) - } - - assert.Equal(t, f.Data(), f2.Data()) - -} - -func TestMulScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{2})) - b := New(WithBacking([]float64{3})) - var correct interface{} = 6.0 - - res, err := Mul(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Mul(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{3, 2})) - b = New(WithBacking([]float64{2})) - correct = []float64{6, 4} - - res, err = Mul(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Mul(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{3, 5})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{21, 10} - - res, err = Mul(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Mul(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Interface - tensor - ai := 2.0 - b = NewDense(Float64, Shape{1, 1}, WithBacking([]float64{3})) - correct = []float64{6.0} - - res, err = Mul(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Commutativity - res, err = Mul(b, ai) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestDivScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{6})) - b := New(WithBacking([]float64{2})) - var correct interface{} = 3.0 - - res, err := Div(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{6, 4})) - b = New(WithBacking([]float64{2})) - correct = []float64{3, 2} - - res, err = Div(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor-scalar - a = New(WithBacking([]float64{6})) - b = New(WithBacking([]float64{3, 2})) - correct = []float64{2, 3} - - res, err = Div(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{21, 10})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{3, 5} - - res, err = Div(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // interface-scalar - ai := 6.0 - b = New(WithBacking([]float64{2})) - correct = 3.0 - - res, err = Div(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-interface - a = New(WithBacking([]float64{6})) - bi := 2.0 - correct = 3.0 - - res, err = Div(a, bi) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestAddScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{2})) - b := New(WithBacking([]float64{3})) - var correct interface{} = 5.0 - - res, err := Add(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Add(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{3, 2})) - b = New(WithBacking([]float64{2})) - correct = []float64{5, 4} - - res, err = Add(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Add(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{3, 5})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{10, 7} - - res, err = Add(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Add(b, a) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // interface-scalar - ai := 2.0 - b = New(WithBacking([]float64{3})) - correct = 5.0 - - res, err = Add(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // Test commutativity - res, err = Add(b, ai) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestSubScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{6})) - b := New(WithBacking([]float64{2})) - var correct interface{} = 4.0 - - res, err := Sub(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{6, 4})) - b = New(WithBacking([]float64{2})) - correct = []float64{4, 2} - - res, err = Sub(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor-scalar - a = New(WithBacking([]float64{6})) - b = New(WithBacking([]float64{3, 2})) - correct = []float64{3, 4} - - res, err = Sub(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{21, 10})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{14, 8} - - res, err = Sub(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // interface-scalar - ai := 6.0 - b = New(WithBacking([]float64{2})) - correct = 4.0 - - res, err = Sub(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-interface - a = New(WithBacking([]float64{6})) - bi := 2.0 - correct = 4.0 - - res, err = Sub(a, bi) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestModScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{5})) - b := New(WithBacking([]float64{2})) - var correct interface{} = 1.0 - - res, err := Mod(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{5, 4})) - b = New(WithBacking([]float64{2})) - correct = []float64{1, 0} - - res, err = Mod(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor-scalar - a = New(WithBacking([]float64{5})) - b = New(WithBacking([]float64{3, 2})) - correct = []float64{2, 1} - - res, err = Mod(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{22, 10})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{1, 0} - - res, err = Mod(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // interface-scalar - ai := 5.0 - b = New(WithBacking([]float64{2})) - correct = 1.0 - - res, err = Mod(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-interface - a = New(WithBacking([]float64{5})) - bi := 2.0 - correct = 1.0 - - res, err = Mod(a, bi) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} - -func TestPowScalarScalar(t *testing.T) { - // scalar-scalar - a := New(WithBacking([]float64{6})) - b := New(WithBacking([]float64{2})) - var correct interface{} = 36.0 - - res, err := Pow(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-tensor - a = New(WithBacking([]float64{6, 4})) - b = New(WithBacking([]float64{2})) - correct = []float64{36, 16} - - res, err = Pow(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor-scalar - a = New(WithBacking([]float64{6})) - b = New(WithBacking([]float64{3, 2})) - correct = []float64{216, 36} - - res, err = Pow(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // tensor - tensor - a = New(WithBacking([]float64{3, 10})) - b = New(WithBacking([]float64{7, 2})) - correct = []float64{2187, 100} - - res, err = Pow(a, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // interface-scalar - ai := 6.0 - b = New(WithBacking([]float64{2})) - correct = 36.0 - - res, err = Pow(ai, b) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) - - // scalar-interface - a = New(WithBacking([]float64{6})) - bi := 2.0 - correct = 36.0 - - res, err = Pow(a, bi) - if err != nil { - t.Fatalf("Error: %v", err) - } - assert.Equal(t, correct, res.Data()) -} +package tensor + +import ( + "log" + "math/rand" + "testing" + "testing/quick" + "time" + + "github.com/stretchr/testify/assert" +) + +// This file contains the tests for API functions that aren't generated by genlib + +func TestMod(t *testing.T) { + a := New(WithBacking([]float64{1, 2, 3, 4})) + b := New(WithBacking([]float64{1, 1, 1, 1})) + var correct interface{} = []float64{0, 0, 0, 0} + + // vec-vec + res, err := Mod(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar + if res, err = Mod(a, 1.0); err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestFMA(t *testing.T) { + same := func(q *Dense) bool { + a := q.Clone().(*Dense) + x := q.Clone().(*Dense) + y := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...)) + y.Memset(identityVal(100, q.Dtype())) + WithEngine(q.Engine())(y) + y2 := y.Clone().(*Dense) + + we, willFailEq := willerr(a, numberTypes, nil) + _, ok1 := q.Engine().(FMAer) + _, ok2 := q.Engine().(Muler) + _, ok3 := q.Engine().(Adder) + we = we || (!ok1 && (!ok2 || !ok3)) + + f, err := FMA(a, x, y) + if err, retEarly := qcErrCheck(t, "FMA#1", a, x, we, err); retEarly { + if err != nil { + log.Printf("q.Engine() %T", q.Engine()) + return false + } + return true + } + + we, _ = willerr(a, numberTypes, nil) + _, ok := a.Engine().(Muler) + we = we || !ok + wi, err := Mul(a, x, WithIncr(y2)) + if err, retEarly := qcErrCheck(t, "FMA#2", a, x, we, err); retEarly { + if err != nil { + return false + } + return true + } + return qcEqCheck(t, q.Dtype(), willFailEq, wi, f) + } + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if err := quick.Check(same, &quick.Config{Rand: r}); err != nil { + t.Error(err) + } + + // specific engines + var eng Engine + + // FLOAT64 ENGINE + + // vec-vec + eng = Float64Engine{} + a := New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) + x := New(WithBacking(Range(Float64, 1, 101)), WithEngine(eng)) + y := New(Of(Float64), WithShape(100), WithEngine(eng)) + + f, err := FMA(a, x, y) + if err != nil { + t.Fatal(err) + } + + a2 := New(WithBacking(Range(Float64, 0, 100))) + x2 := New(WithBacking(Range(Float64, 1, 101))) + y2 := New(Of(Float64), WithShape(100)) + f2, err := Mul(a2, x2, WithIncr(y2)) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, f.Data(), f2.Data()) + + // vec-scalar + a = New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) + y = New(Of(Float64), WithShape(100)) + + if f, err = FMA(a, 2.0, y); err != nil { + t.Fatal(err) + } + + a2 = New(WithBacking(Range(Float64, 0, 100))) + y2 = New(Of(Float64), WithShape(100)) + if f2, err = Mul(a2, 2.0, WithIncr(y2)); err != nil { + t.Fatal(err) + } + + assert.Equal(t, f.Data(), f2.Data()) + + // FLOAT32 engine + eng = Float32Engine{} + a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) + x = New(WithBacking(Range(Float32, 1, 101)), WithEngine(eng)) + y = New(Of(Float32), WithShape(100), WithEngine(eng)) + + f, err = FMA(a, x, y) + if err != nil { + t.Fatal(err) + } + + a2 = New(WithBacking(Range(Float32, 0, 100))) + x2 = New(WithBacking(Range(Float32, 1, 101))) + y2 = New(Of(Float32), WithShape(100)) + f2, err = Mul(a2, x2, WithIncr(y2)) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, f.Data(), f2.Data()) + + // vec-scalar + a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) + y = New(Of(Float32), WithShape(100)) + + if f, err = FMA(a, float32(2), y); err != nil { + t.Fatal(err) + } + + a2 = New(WithBacking(Range(Float32, 0, 100))) + y2 = New(Of(Float32), WithShape(100)) + if f2, err = Mul(a2, float32(2), WithIncr(y2)); err != nil { + t.Fatal(err) + } + + assert.Equal(t, f.Data(), f2.Data()) + +} + +func TestMulScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{2})) + b := New(WithBacking([]float64{3})) + var correct interface{} = 6.0 + + res, err := Mul(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Mul(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{3, 2})) + b = New(WithBacking([]float64{2})) + correct = []float64{6, 4} + + res, err = Mul(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Mul(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{3, 5})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{21, 10} + + res, err = Mul(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Mul(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Interface - tensor + ai := 2.0 + b = NewDense(Float64, Shape{1, 1}, WithBacking([]float64{3})) + correct = []float64{6.0} + + res, err = Mul(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Commutativity + res, err = Mul(b, ai) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestDivScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{6})) + b := New(WithBacking([]float64{2})) + var correct interface{} = 3.0 + + res, err := Div(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{6, 4})) + b = New(WithBacking([]float64{2})) + correct = []float64{3, 2} + + res, err = Div(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor-scalar + a = New(WithBacking([]float64{6})) + b = New(WithBacking([]float64{3, 2})) + correct = []float64{2, 3} + + res, err = Div(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{21, 10})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{3, 5} + + res, err = Div(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // interface-scalar + ai := 6.0 + b = New(WithBacking([]float64{2})) + correct = 3.0 + + res, err = Div(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-interface + a = New(WithBacking([]float64{6})) + bi := 2.0 + correct = 3.0 + + res, err = Div(a, bi) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestAddScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{2})) + b := New(WithBacking([]float64{3})) + var correct interface{} = 5.0 + + res, err := Add(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Add(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{3, 2})) + b = New(WithBacking([]float64{2})) + correct = []float64{5, 4} + + res, err = Add(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Add(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{3, 5})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{10, 7} + + res, err = Add(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Add(b, a) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // interface-scalar + ai := 2.0 + b = New(WithBacking([]float64{3})) + correct = 5.0 + + res, err = Add(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // Test commutativity + res, err = Add(b, ai) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestSubScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{6})) + b := New(WithBacking([]float64{2})) + var correct interface{} = 4.0 + + res, err := Sub(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{6, 4})) + b = New(WithBacking([]float64{2})) + correct = []float64{4, 2} + + res, err = Sub(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor-scalar + a = New(WithBacking([]float64{6})) + b = New(WithBacking([]float64{3, 2})) + correct = []float64{3, 4} + + res, err = Sub(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{21, 10})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{14, 8} + + res, err = Sub(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // interface-scalar + ai := 6.0 + b = New(WithBacking([]float64{2})) + correct = 4.0 + + res, err = Sub(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-interface + a = New(WithBacking([]float64{6})) + bi := 2.0 + correct = 4.0 + + res, err = Sub(a, bi) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestModScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{5})) + b := New(WithBacking([]float64{2})) + var correct interface{} = 1.0 + + res, err := Mod(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{5, 4})) + b = New(WithBacking([]float64{2})) + correct = []float64{1, 0} + + res, err = Mod(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor-scalar + a = New(WithBacking([]float64{5})) + b = New(WithBacking([]float64{3, 2})) + correct = []float64{2, 1} + + res, err = Mod(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{22, 10})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{1, 0} + + res, err = Mod(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // interface-scalar + ai := 5.0 + b = New(WithBacking([]float64{2})) + correct = 1.0 + + res, err = Mod(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-interface + a = New(WithBacking([]float64{5})) + bi := 2.0 + correct = 1.0 + + res, err = Mod(a, bi) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} + +func TestPowScalarScalar(t *testing.T) { + // scalar-scalar + a := New(WithBacking([]float64{6})) + b := New(WithBacking([]float64{2})) + var correct interface{} = 36.0 + + res, err := Pow(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-tensor + a = New(WithBacking([]float64{6, 4})) + b = New(WithBacking([]float64{2})) + correct = []float64{36, 16} + + res, err = Pow(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor-scalar + a = New(WithBacking([]float64{6})) + b = New(WithBacking([]float64{3, 2})) + correct = []float64{216, 36} + + res, err = Pow(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // tensor - tensor + a = New(WithBacking([]float64{3, 10})) + b = New(WithBacking([]float64{7, 2})) + correct = []float64{2187, 100} + + res, err = Pow(a, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // interface-scalar + ai := 6.0 + b = New(WithBacking([]float64{2})) + correct = 36.0 + + res, err = Pow(ai, b) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) + + // scalar-interface + a = New(WithBacking([]float64{6})) + bi := 2.0 + correct = 36.0 + + res, err = Pow(a, bi) + if err != nil { + t.Fatalf("Error: %v", err) + } + assert.Equal(t, correct, res.Data()) +} diff --git a/api_matop.go b/api_matop.go index fe3cad9..75c2452 100644 --- a/api_matop.go +++ b/api_matop.go @@ -13,7 +13,7 @@ func Repeat(t Tensor, axis int, repeats ...int) (retVal Tensor, err error) { return nil, errors.New("Engine does not support Repeat") } -// RepeatReuse repeats a Tensor along the axis and the given number of repeats, and puts the results in the provided reuse tensor. If the reuse tensor is not correctly sized, then an error will be given // ???? , but the results will still be valid. +// RepeatReuse repeats a Tensor along the axis and the given number of repeats, and puts the results in the provided reuse tensor. If the reuse tensor is not correctly sized, then an error will be given, but the results will still be valid. func RepeatReuse(t, reuse Tensor, axis int, repeats ...int) (retval Tensor, err error) { if r, ok := t.Engine().(Repeater); ok { return r.RepeatReuse(t, reuse, axis, repeats...) diff --git a/array.go b/array.go index d6c07c6..ca948d6 100644 --- a/array.go +++ b/array.go @@ -3,6 +3,7 @@ package tensor import ( "fmt" "reflect" + "sync" "unsafe" "github.com/pkg/errors" @@ -11,33 +12,21 @@ import ( // array is the underlying generic array. type array struct { - storage.Header // the header - the Go representation (a slice) - t Dtype // the element type - v interface{} // an additional reference to the underlying slice. This is not strictly necessary, but does improve upon anything that calls .Data() -} - -// makeHeader makes a array Header -func makeHeader(t Dtype, length int) storage.Header { - return storage.Header{ - Ptr: malloc(t, length), - L: length, - C: length, - } + storage.Header // the header - the Go representation (a slice) + t Dtype // the element type } // makeArray makes an array. The memory allocation is handled by Go func makeArray(t Dtype, length int) array { - hdr := makeHeader(t, length) - return makeArrayFromHeader(hdr, t) -} - -// makeArrayFromHeader makes an array given a header -func makeArrayFromHeader(hdr storage.Header, t Dtype) array { + v := malloc(t, length) + hdr := storage.Header{ + Raw: v, + } return array{ Header: hdr, t: t, - v: nil, } + } // arrayFromSlice creates an array from a slice. If x is not a slice, it will panic. @@ -48,20 +37,18 @@ func arrayFromSlice(x interface{}) array { } elT := xT.Elem() - xV := reflect.ValueOf(x) - uptr := unsafe.Pointer(xV.Pointer()) - return array{ Header: storage.Header{ - Ptr: uptr, - L: xV.Len(), - C: xV.Cap(), + Raw: storage.AsByteSlice(x), }, t: Dtype{elT}, - v: x, } } +func (a *array) Len() int { return a.Header.TypedLen(a.t.Type) } + +func (a *array) Cap() int { return a.Header.TypedLen(a.t.Type) } + // fromSlice populates the value from a slice func (a *array) fromSlice(x interface{}) { xT := reflect.TypeOf(x) @@ -69,14 +56,8 @@ func (a *array) fromSlice(x interface{}) { panic("Expected a slice") } elT := xT.Elem() - xV := reflect.ValueOf(x) - uptr := unsafe.Pointer(xV.Pointer()) - - a.Ptr = uptr - a.L = xV.Len() - a.C = xV.Cap() + a.Raw = storage.AsByteSlice(x) a.t = Dtype{elT} - a.v = x } // fromSliceOrTensor populates the value from a slice or anything that can form an array @@ -85,94 +66,52 @@ func (a *array) fromSliceOrArrayer(x interface{}) { xp := T.arrPtr() // if the underlying array hasn't been allocated, or not enough has been allocated - if a.Ptr == nil || a.L < xp.L || a.C < xp.C { - a.t = xp.t - a.L = xp.L - a.C = xp.C - a.Ptr = malloc(a.t, a.L) + if a.Header.Raw == nil { + a.Header.Raw = malloc(xp.t, xp.Len()) } a.t = xp.t - a.L = xp.L - a.C = xp.C copyArray(a, T.arrPtr()) - a.v = nil // tell the GC to release whatever a.v may hold - a.forcefix() // fix it such that a.v has a value and is not nil return } a.fromSlice(x) } -// fix fills the a.v empty interface{} if it's not nil -func (a *array) fix() { - if a.v == nil { - a.forcefix() - } -} - -// forcefix fills the a.v empty interface{}. No checks are made if the thing is empty -func (a *array) forcefix() { - sliceT := reflect.SliceOf(a.t.Type) - ptr := unsafe.Pointer(&a.Header) - val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) - a.v = val.Interface() -} - // byteSlice casts the underlying slice into a byte slice. Useful for copying and zeroing, but not much else -func (a array) byteSlice() []byte { - return storage.AsByteSlice(&a.Header, a.t.Type) -} +func (a array) byteSlice() []byte { return a.Header.Raw } // sliceInto creates a slice. Instead of returning an array, which would cause a lot of reallocations, sliceInto expects a array to // already have been created. This allows repetitive actions to be done without having to have many pointless allocation func (a *array) sliceInto(i, j int, res *array) { - c := a.C + c := a.Cap() if i < 0 || j < i || j > c { panic(fmt.Sprintf("Cannot slice %v - index %d:%d is out of bounds", a, i, j)) } - res.L = j - i - res.C = c - i + s := i * int(a.t.Size()) + e := j * int(a.t.Size()) + c = c - i + + res.Raw = a.Raw[s:e] - if c-1 > 0 { - res.Ptr = storage.ElementAt(i, a.Ptr, a.t.Size()) - } else { - // don't advance pointer - res.Ptr = a.Ptr - } - res.fix() } // slice slices an array func (a array) slice(start, end int) array { - if end > a.L { + if end > a.Len() { panic("Index out of range") } if end < start { panic("Index out of range") } - L := end - start - C := a.C - start - - var startptr unsafe.Pointer - if a.C-start > 0 { - startptr = storage.ElementAt(start, a.Ptr, a.t.Size()) - } else { - startptr = a.Ptr - } - - hdr := storage.Header{ - Ptr: startptr, - L: L, - C: C, - } + s := start * int(a.t.Size()) + e := end * int(a.t.Size()) return array{ - Header: hdr, + Header: storage.Header{Raw: a.Raw[s:e]}, t: a.t, - v: nil, } } @@ -216,30 +155,24 @@ func (a *array) swap(i, j int) { /* *Array is a Memory */ // Uintptr returns the pointer of the first value of the slab -func (a *array) Uintptr() uintptr { return uintptr(a.Ptr) } +func (a *array) Uintptr() uintptr { return uintptr(unsafe.Pointer(&a.Header.Raw[0])) } // MemSize returns how big the slice is in bytes -func (a *array) MemSize() uintptr { return uintptr(a.L) * a.t.Size() } - -// Pointer returns the pointer of the first value of the slab, as an unsafe.Pointer -func (a *array) Pointer() unsafe.Pointer { return a.Ptr } +func (a *array) MemSize() uintptr { return uintptr(len(a.Header.Raw)) } // Data returns the representation of a slice. func (a array) Data() interface{} { - if a.v == nil { - // build a type of []T - shdr := reflect.SliceHeader{ - Data: uintptr(a.Header.Ptr), - Len: a.Header.L, - Cap: a.Header.C, - } - sliceT := reflect.SliceOf(a.t.Type) - ptr := unsafe.Pointer(&shdr) - val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) - a.v = val.Interface() - + // build a type of []T + shdr := reflect.SliceHeader{ + Data: a.Uintptr(), + Len: a.Len(), + Cap: a.Cap(), } - return a.v + sliceT := reflect.SliceOf(a.t.Type) + ptr := unsafe.Pointer(&shdr) + val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) + return val.Interface() + } // Zero zeroes out the underlying array of the *Dense tensor. @@ -258,10 +191,10 @@ func (a array) Zero() { } return } - ptr := uintptr(a.Ptr) - for i := 0; i < a.L; i++ { - want := ptr + uintptr(i)*a.t.Size() - val := reflect.NewAt(a.t.Type, unsafe.Pointer(want)) + + l := a.Len() + for i := 0; i < l; i++ { + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(reflect.Zero(a.t)) } @@ -273,10 +206,9 @@ func (a *array) rtype() reflect.Type { return a.t.Type } /* MEMORY MOVEMENT STUFF */ // malloc is standard Go allocation of a block of memory - the plus side is that Go manages the memory -func malloc(t Dtype, length int) unsafe.Pointer { +func malloc(t Dtype, length int) []byte { size := int(calcMemSize(t, length)) - s := make([]byte, size) - return unsafe.Pointer(&s[0]) + return make([]byte, size) } // calcMemSize calulates the memory size of an array (given its size) @@ -427,95 +359,79 @@ func copyDenseIter(dst, src DenseTensor, diter, siter Iterator) (int, error) { return storage.CopyIter(dst.rtype(), dst.hdr(), src.hdr(), diter, siter), nil } -func getPointer(a interface{}) unsafe.Pointer { +type scalarPtrCount struct { + Ptr unsafe.Pointer + Count int +} + +// scalarRCLock is a lock for the reference counting list. +var scalarRCLock sync.Mutex + +// scalarRC is a bunch of reference counted pointers to scalar values +var scalarRC = make(map[uintptr]*sync.Pool) // uintptr is the size, the pool stores []byte + +func scalarPool(size uintptr) *sync.Pool { + scalarRCLock.Lock() + pool, ok := scalarRC[size] + if !ok { + pool = &sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + } + scalarRC[size] = pool + } + scalarRCLock.Unlock() + return pool +} + +func allocScalar(a interface{}) []byte { + atype := reflect.TypeOf(a) + size := atype.Size() + pool := scalarPool(size) + return pool.Get().([]byte) +} + +func freeScalar(bs []byte) { + if bs == nil { + return + } + + // zero out + for i := range bs { + bs[i] = 0 + } + + size := uintptr(len(bs)) + + // put it back into pool + pool := scalarPool(size) + pool.Put(bs) +} + +// scalarToHeader creates a Header from a scalar value +func scalarToHeader(a interface{}) (hdr *storage.Header, newAlloc bool) { + var raw []byte switch at := a.(type) { case Memory: - return at.Pointer() - case bool: - return unsafe.Pointer(&at) - case int: - return unsafe.Pointer(&at) - case int8: - return unsafe.Pointer(&at) - case int16: - return unsafe.Pointer(&at) - case int32: - return unsafe.Pointer(&at) - case int64: - return unsafe.Pointer(&at) - case uint: - return unsafe.Pointer(&at) - case uint8: - return unsafe.Pointer(&at) - case uint16: - return unsafe.Pointer(&at) - case uint32: - return unsafe.Pointer(&at) - case uint64: - return unsafe.Pointer(&at) - case float32: - return unsafe.Pointer(&at) - case float64: - return unsafe.Pointer(&at) - case complex64: - return unsafe.Pointer(&at) - case complex128: - return unsafe.Pointer(&at) - case string: - return unsafe.Pointer(&at) - case uintptr: - return unsafe.Pointer(at) - case unsafe.Pointer: - return at - - // POINTERS - - case *bool: - return unsafe.Pointer(at) - case *int: - return unsafe.Pointer(at) - case *int8: - return unsafe.Pointer(at) - case *int16: - return unsafe.Pointer(at) - case *int32: - return unsafe.Pointer(at) - case *int64: - return unsafe.Pointer(at) - case *uint: - return unsafe.Pointer(at) - case *uint8: - return unsafe.Pointer(at) - case *uint16: - return unsafe.Pointer(at) - case *uint32: - return unsafe.Pointer(at) - case *uint64: - return unsafe.Pointer(at) - case *float32: - return unsafe.Pointer(at) - case *float64: - return unsafe.Pointer(at) - case *complex64: - return unsafe.Pointer(at) - case *complex128: - return unsafe.Pointer(at) - case *string: - return unsafe.Pointer(at) - case *uintptr: - return unsafe.Pointer(*at) - case *unsafe.Pointer: - return *at - } - - panic("Cannot get pointer") + raw = storage.FromMemory(at.Uintptr(), at.MemSize()) + default: + raw = allocScalar(a) + newAlloc = true + } + hdr = borrowHeader() + hdr.Raw = raw + if newAlloc { + copyScalarToPrealloc(a, hdr.Raw) + } + + return hdr, newAlloc } -// scalarToHeader creates a Header from a scalar value -func scalarToHeader(a interface{}) *storage.Header { - hdr := borrowHeader() - hdr.Ptr = getPointer(a) - hdr.L = 1 - hdr.C = 1 - return hdr +func copyScalarToPrealloc(a interface{}, bs []byte) { + xV := reflect.ValueOf(a) + xT := reflect.TypeOf(a) + + p := unsafe.Pointer(&bs[0]) + v := reflect.NewAt(xT, p) + reflect.Indirect(v).Set(xV) + return } diff --git a/array_getset.go b/array_getset.go index 1f71afd..c19fe68 100644 --- a/array_getset.go +++ b/array_getset.go @@ -7,6 +7,7 @@ import ( "unsafe" "github.com/pkg/errors" + "gorgonia.org/tensor/internal/storage" ) // Set sets the value of the underlying array at the index i. @@ -68,8 +69,7 @@ func (a *array) Set(i int, x interface{}) { a.SetUnsafePointer(i, xv) default: xv := reflect.ValueOf(x) - want := unsafe.Pointer(uintptr(a.Ptr) + uintptr(i)*a.t.Size()) - val := reflect.NewAt(a.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } @@ -80,43 +80,60 @@ func (a *array) Get(i int) interface{} { switch a.t.Kind() { case reflect.Bool: return a.GetB(i) + case reflect.Int: return a.GetI(i) + case reflect.Int8: return a.GetI8(i) + case reflect.Int16: return a.GetI16(i) + case reflect.Int32: return a.GetI32(i) + case reflect.Int64: return a.GetI64(i) + case reflect.Uint: return a.GetU(i) + case reflect.Uint8: return a.GetU8(i) + case reflect.Uint16: return a.GetU16(i) + case reflect.Uint32: return a.GetU32(i) + case reflect.Uint64: return a.GetU64(i) + case reflect.Uintptr: return a.GetUintptr(i) + case reflect.Float32: return a.GetF32(i) + case reflect.Float64: return a.GetF64(i) + case reflect.Complex64: return a.GetC64(i) + case reflect.Complex128: return a.GetC128(i) + case reflect.String: return a.GetStr(i) + case reflect.UnsafePointer: return a.GetUnsafePointer(i) + default: - at := unsafe.Pointer(uintptr(a.Ptr) + uintptr(i)*a.t.Size()) - val := reflect.NewAt(a.t.Type, at) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) return val.Interface() } @@ -290,25 +307,24 @@ func (a *array) Memset(x interface{}) error { } xv := reflect.ValueOf(x) - ptr := uintptr(a.Ptr) - for i := 0; i < a.L; i++ { - want := ptr + uintptr(i)*a.t.Size() - val := reflect.NewAt(a.t.Type, unsafe.Pointer(want)) + l := a.Len() + for i := 0; i < l; i++ { + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } return nil } -func (t *array) memsetIter(x interface{}, it Iterator) (err error) { +func (a *array) memsetIter(x interface{}, it Iterator) (err error) { var i int - switch t.t { + switch a.t { case Bool: xv, ok := x.(bool) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Bools() + data := a.Bools() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -316,9 +332,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Int: xv, ok := x.(int) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Ints() + data := a.Ints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -326,9 +342,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Int8: xv, ok := x.(int8) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Int8s() + data := a.Int8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -336,9 +352,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Int16: xv, ok := x.(int16) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Int16s() + data := a.Int16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -346,9 +362,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Int32: xv, ok := x.(int32) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Int32s() + data := a.Int32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -356,9 +372,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Int64: xv, ok := x.(int64) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Int64s() + data := a.Int64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -366,9 +382,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uint: xv, ok := x.(uint) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uints() + data := a.Uints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -376,9 +392,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uint8: xv, ok := x.(uint8) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uint8s() + data := a.Uint8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -386,9 +402,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uint16: xv, ok := x.(uint16) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uint16s() + data := a.Uint16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -396,9 +412,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uint32: xv, ok := x.(uint32) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uint32s() + data := a.Uint32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -406,9 +422,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uint64: xv, ok := x.(uint64) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uint64s() + data := a.Uint64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -416,9 +432,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Uintptr: xv, ok := x.(uintptr) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Uintptrs() + data := a.Uintptrs() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -426,9 +442,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Float32: xv, ok := x.(float32) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Float32s() + data := a.Float32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -436,9 +452,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Float64: xv, ok := x.(float64) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Float64s() + data := a.Float64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -446,9 +462,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Complex64: xv, ok := x.(complex64) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Complex64s() + data := a.Complex64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -456,9 +472,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case Complex128: xv, ok := x.(complex128) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Complex128s() + data := a.Complex128s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -466,9 +482,9 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case String: xv, ok := x.(string) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.Strings() + data := a.Strings() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } @@ -476,19 +492,17 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { case UnsafePointer: xv, ok := x.(unsafe.Pointer) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.UnsafePointers() + data := a.UnsafePointers() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) default: xv := reflect.ValueOf(x) - ptr := uintptr(t.Ptr) for i, err = it.Next(); err == nil; i, err = it.Next() { - want := ptr + uintptr(i)*t.t.Size() - val := reflect.NewAt(t.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } @@ -504,7 +518,7 @@ func (a array) Eq(other interface{}) bool { return false } - if oa.L != a.L { + if oa.Len() != a.Len() { return false } /* @@ -514,7 +528,7 @@ func (a array) Eq(other interface{}) bool { */ // same exact thing - if uintptr(oa.Ptr) == uintptr(a.Ptr) { + if uintptr(unsafe.Pointer(&oa.Header.Raw[0])) == uintptr(unsafe.Pointer(&a.Header.Raw[0])) { return true } @@ -628,7 +642,7 @@ func (a array) Eq(other interface{}) bool { } } default: - for i := 0; i < a.L; i++ { + for i := 0; i < a.Len(); i++ { if !reflect.DeepEqual(a.Get(i), oa.Get(i)) { return false } @@ -639,124 +653,122 @@ func (a array) Eq(other interface{}) bool { return false } -func (t *array) zeroIter(it Iterator) (err error) { +func (a *array) zeroIter(it Iterator) (err error) { var i int - switch t.t { + switch a.t { case Bool: - data := t.Bools() + data := a.Bools() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = false } err = handleNoOp(err) case Int: - data := t.Ints() + data := a.Ints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int8: - data := t.Int8s() + data := a.Int8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int16: - data := t.Int16s() + data := a.Int16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int32: - data := t.Int32s() + data := a.Int32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int64: - data := t.Int64s() + data := a.Int64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint: - data := t.Uints() + data := a.Uints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint8: - data := t.Uint8s() + data := a.Uint8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint16: - data := t.Uint16s() + data := a.Uint16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint32: - data := t.Uint32s() + data := a.Uint32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint64: - data := t.Uint64s() + data := a.Uint64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uintptr: - data := t.Uintptrs() + data := a.Uintptrs() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Float32: - data := t.Float32s() + data := a.Float32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Float64: - data := t.Float64s() + data := a.Float64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Complex64: - data := t.Complex64s() + data := a.Complex64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Complex128: - data := t.Complex128s() + data := a.Complex128s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case String: - data := t.Strings() + data := a.Strings() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = "" } err = handleNoOp(err) case UnsafePointer: - data := t.UnsafePointers() + data := a.UnsafePointers() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = nil } err = handleNoOp(err) default: - ptr := uintptr(t.Ptr) for i, err = it.Next(); err == nil; i, err = it.Next() { - want := ptr + uintptr(i)*t.t.Size() - val := reflect.NewAt(t.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) - val.Set(reflect.Zero(t.t)) + val.Set(reflect.Zero(a.t)) } err = handleNoOp(err) } diff --git a/bench.sh b/bench.sh new file mode 100755 index 0000000..8523853 --- /dev/null +++ b/bench.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +old=$1; +new=$2; + +git checkout $old +# https://stackoverflow.com/a/2111099 +branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,') +echo "Benchmarking $branch (old)" +go test -run=$^ -bench=. > ${branch}.bench +for i in {1..10} + do + go test -run=$^ -bench=. >> ${branch}.bench + done + +git checkout $new +branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,') +echo "Benchmarking $branch (new)" +go test -run=$^ -bench=. > ${branch}.bench +for i in {1..10} + do + go test -run=$^ -bench=. >> ${branch}.bench + done diff --git a/consopt.go b/consopt.go index 2134896..8cbc54f 100644 --- a/consopt.go +++ b/consopt.go @@ -2,7 +2,8 @@ package tensor import ( "reflect" - "unsafe" + + "gorgonia.org/tensor/internal/storage" ) // ConsOpt is a tensor construction option. @@ -106,24 +107,15 @@ func FromScalar(x interface{}, argMask ...[]bool) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: - xt := reflect.TypeOf(x) - xv := reflect.New(xt) - xvi := reflect.Indirect(xv) - xvi.Set(reflect.ValueOf(x)) - uptr := unsafe.Pointer(xv.Pointer()) - var v interface{} - if !tt.Shape().IsScalar() { - sl := reflect.MakeSlice(reflect.SliceOf(xt), 1, 1) - zeroth := sl.Index(0) - zeroth.Set(reflect.ValueOf(x)) - v = sl.Interface() - } - tt.array.Ptr = uptr - tt.array.L = 1 - tt.array.C = 1 - tt.v = v - tt.t = Dtype{xt} + xT := reflect.TypeOf(x) + sxT := reflect.SliceOf(xT) + xv := reflect.MakeSlice(sxT, 1, 1) // []T + xv0 := xv.Index(0) // xv[0] + xv0.Set(reflect.ValueOf(x)) + tt.array.Header.Raw = storage.AsByteSlice(xv.Interface()) + tt.t = Dtype{xT} + tt.mask = mask default: @@ -152,17 +144,11 @@ func FromMemory(ptr uintptr, memsize uintptr) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: - tt.v = nil // if there were any underlying slices it should be GC'd - tt.array.Ptr = unsafe.Pointer(ptr) - tt.array.L = int(memsize / tt.t.Size()) - tt.array.C = int(memsize / tt.t.Size()) - tt.flag = MakeMemoryFlag(tt.flag, ManuallyManaged) - - if tt.IsNativelyAccessible() { - tt.array.fix() - } + tt.Header.Raw = nil // GC anything if needed + tt.Header.Raw = storage.FromMemory(ptr, memsize) + tt.flag = MakeMemoryFlag(tt.flag, ManuallyManaged) default: panic("Unsupported Tensor type") } diff --git a/defaultengine.go b/defaultengine.go index bc92e8c..d9138ae 100644 --- a/defaultengine.go +++ b/defaultengine.go @@ -1,76 +1,69 @@ -package tensor - -import ( - "unsafe" - - "github.com/pkg/errors" - "gorgonia.org/tensor/internal/execution" -) - -// StdEng is the default execution engine that comes with the tensors. To use other execution engines, use the WithEngine construction option. -type StdEng struct { - execution.E -} - -// makeArray allocates a slice for the array -func (e StdEng) makeArray(arr *array, t Dtype, size int) { - memsize := calcMemSize(t, size) - s := make([]byte, memsize) - arr.t = t - arr.L = size - arr.C = size - arr.Ptr = unsafe.Pointer(&s[0]) - arr.fix() -} - -func (e StdEng) AllocAccessible() bool { return true } -func (e StdEng) Alloc(size int64) (Memory, error) { return nil, noopError{} } -func (e StdEng) Free(mem Memory, size int64) error { return nil } -func (e StdEng) Memset(mem Memory, val interface{}) error { - if ms, ok := mem.(MemSetter); ok { - return ms.Memset(val) - } - return errors.Errorf("Cannot memset %v with StdEng", mem) -} - -func (e StdEng) Memclr(mem Memory) { - if z, ok := mem.(Zeroer); ok { - z.Zero() - } - return -} - -func (e StdEng) Memcpy(dst, src Memory) error { - switch dt := dst.(type) { - case *array: - switch st := src.(type) { - case *array: - copyArray(dt, st) - return nil - case arrayer: - copyArray(dt, st.arrPtr()) - return nil - } - case arrayer: - switch st := src.(type) { - case *array: - copyArray(dt.arrPtr(), st) - return nil - case arrayer: - copyArray(dt.arrPtr(), st.arrPtr()) - return nil - } - } - return errors.Errorf("Failed to copy %T %T", dst, src) -} - -func (e StdEng) Accessible(mem Memory) (Memory, error) { return mem, nil } - -func (e StdEng) WorksWith(order DataOrder) bool { return true } - -func (e StdEng) checkAccessible(t Tensor) error { - if !t.IsNativelyAccessible() { - return errors.Errorf(inaccessibleData, t) - } - return nil -} +package tensor + +import ( + "github.com/pkg/errors" + "gorgonia.org/tensor/internal/execution" +) + +// StdEng is the default execution engine that comes with the tensors. To use other execution engines, use the WithEngine construction option. +type StdEng struct { + execution.E +} + +// makeArray allocates a slice for the array +func (e StdEng) makeArray(arr *array, t Dtype, size int) { + arr.Raw = malloc(t, size) + arr.t = t +} + +func (e StdEng) AllocAccessible() bool { return true } +func (e StdEng) Alloc(size int64) (Memory, error) { return nil, noopError{} } +func (e StdEng) Free(mem Memory, size int64) error { return nil } +func (e StdEng) Memset(mem Memory, val interface{}) error { + if ms, ok := mem.(MemSetter); ok { + return ms.Memset(val) + } + return errors.Errorf("Cannot memset %v with StdEng", mem) +} + +func (e StdEng) Memclr(mem Memory) { + if z, ok := mem.(Zeroer); ok { + z.Zero() + } + return +} + +func (e StdEng) Memcpy(dst, src Memory) error { + switch dt := dst.(type) { + case *array: + switch st := src.(type) { + case *array: + copyArray(dt, st) + return nil + case arrayer: + copyArray(dt, st.arrPtr()) + return nil + } + case arrayer: + switch st := src.(type) { + case *array: + copyArray(dt.arrPtr(), st) + return nil + case arrayer: + copyArray(dt.arrPtr(), st.arrPtr()) + return nil + } + } + return errors.Errorf("Failed to copy %T %T", dst, src) +} + +func (e StdEng) Accessible(mem Memory) (Memory, error) { return mem, nil } + +func (e StdEng) WorksWith(order DataOrder) bool { return true } + +func (e StdEng) checkAccessible(t Tensor) error { + if !t.IsNativelyAccessible() { + return errors.Errorf(inaccessibleData, t) + } + return nil +} diff --git a/defaultengine_arith.go b/defaultengine_arith.go index 72b171d..918e1ca 100644 --- a/defaultengine_arith.go +++ b/defaultengine_arith.go @@ -48,7 +48,6 @@ func (e StdEng) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -70,7 +69,6 @@ func (e StdEng) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Add(typ, retVal.hdr(), dataB) } - return } @@ -115,7 +113,6 @@ func (e StdEng) Sub(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -137,7 +134,6 @@ func (e StdEng) Sub(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Sub(typ, retVal.hdr(), dataB) } - return } @@ -182,7 +178,6 @@ func (e StdEng) Mul(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -204,7 +199,6 @@ func (e StdEng) Mul(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Mul(typ, retVal.hdr(), dataB) } - return } @@ -249,7 +243,6 @@ func (e StdEng) Div(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -271,7 +264,6 @@ func (e StdEng) Div(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Div(typ, retVal.hdr(), dataB) } - return } @@ -316,7 +308,6 @@ func (e StdEng) Pow(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -338,7 +329,6 @@ func (e StdEng) Pow(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Pow(typ, retVal.hdr(), dataB) } - return } @@ -383,7 +373,6 @@ func (e StdEng) Mod(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit) } - return } switch { @@ -405,7 +394,6 @@ func (e StdEng) Mod(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err } err = e.E.Mod(typ, retVal.hdr(), dataB) } - return } @@ -429,15 +417,15 @@ func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Add") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Add") } scalarHeader = dataA @@ -471,6 +459,9 @@ func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.AddIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -502,6 +493,9 @@ func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Add(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -526,15 +520,15 @@ func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sub") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sub") } scalarHeader = dataA @@ -568,6 +562,9 @@ func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.SubIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -599,6 +596,9 @@ func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Sub(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -623,15 +623,15 @@ func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mul") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mul") } scalarHeader = dataA @@ -665,6 +665,9 @@ func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.MulIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -696,6 +699,9 @@ func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Mul(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -720,15 +726,15 @@ func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Div") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Div") } scalarHeader = dataA @@ -762,6 +768,9 @@ func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.DivIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -793,6 +802,9 @@ func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Div(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -817,15 +829,15 @@ func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Pow") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Pow") } scalarHeader = dataA @@ -859,6 +871,9 @@ func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.PowIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -890,6 +905,9 @@ func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Pow(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -914,15 +932,15 @@ func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mod") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mod") } scalarHeader = dataA @@ -956,6 +974,9 @@ func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.ModIter(typ, dataA, retVal.hdr(), ait, bit) } } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -987,6 +1008,9 @@ func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func } err = e.E.Mod(typ, retVal.hdr(), dataB) } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } diff --git a/defaultengine_cmp.go b/defaultengine_cmp.go index 8c2b919..1d6ff48 100644 --- a/defaultengine_cmp.go +++ b/defaultengine_cmp.go @@ -66,7 +66,6 @@ func (e StdEng) Gt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err erro err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -83,7 +82,6 @@ func (e StdEng) Gt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err erro err = e.E.Gt(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -146,7 +144,6 @@ func (e StdEng) Gte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -163,7 +160,6 @@ func (e StdEng) Gte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err err = e.E.Gte(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -226,7 +222,6 @@ func (e StdEng) Lt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err erro err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -243,7 +238,6 @@ func (e StdEng) Lt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err erro err = e.E.Lt(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -306,7 +300,6 @@ func (e StdEng) Lte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -323,7 +316,6 @@ func (e StdEng) Lte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err err err = e.E.Lte(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -386,7 +378,6 @@ func (e StdEng) ElEq(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err er err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -403,7 +394,6 @@ func (e StdEng) ElEq(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err er err = e.E.Eq(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -466,7 +456,6 @@ func (e StdEng) ElNe(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err er err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - return } @@ -483,7 +472,6 @@ func (e StdEng) ElNe(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err er err = e.E.Ne(typ, dataA, dataB, dataReuse) retVal = reuse } - return } @@ -512,15 +500,15 @@ func (e StdEng) GtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gt") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gt") } scalarHeader = dataA @@ -563,12 +551,15 @@ func (e StdEng) GtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -599,6 +590,9 @@ func (e StdEng) GtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.Gt(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -628,15 +622,15 @@ func (e StdEng) GteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gte") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gte") } scalarHeader = dataA @@ -679,12 +673,15 @@ func (e StdEng) GteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -715,6 +712,9 @@ func (e StdEng) GteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.Gte(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -744,15 +744,15 @@ func (e StdEng) LtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lt") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lt") } scalarHeader = dataA @@ -795,12 +795,15 @@ func (e StdEng) LtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -831,6 +834,9 @@ func (e StdEng) LtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.Lt(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -860,15 +866,15 @@ func (e StdEng) LteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lte") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lte") } scalarHeader = dataA @@ -911,12 +917,15 @@ func (e StdEng) LteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -947,6 +956,9 @@ func (e StdEng) LteScalar(t Tensor, s interface{}, leftTensor bool, opts ...Func err = e.E.Lte(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -972,15 +984,15 @@ func (e StdEng) EqScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Eq") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Eq") } scalarHeader = dataA @@ -1023,12 +1035,15 @@ func (e StdEng) EqScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -1059,6 +1074,9 @@ func (e StdEng) EqScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.Eq(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } @@ -1084,15 +1102,15 @@ func (e StdEng) NeScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Ne") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Ne") } scalarHeader = dataA @@ -1135,12 +1153,15 @@ func (e StdEng) NeScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) @@ -1171,6 +1192,9 @@ func (e StdEng) NeScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncO err = e.E.Ne(typ, dataA, dataB, dataReuse) retVal = reuse } + if newAlloc { + freeScalar(scalarHeader.Raw) + } returnHeader(scalarHeader) return } diff --git a/defaultengine_matop_misc.go b/defaultengine_matop_misc.go index f45c9bb..59c3b69 100644 --- a/defaultengine_matop_misc.go +++ b/defaultengine_matop_misc.go @@ -180,8 +180,8 @@ func (e StdEng) fastCopyDenseRepeat(src DenseTensor, dest *Dense, outers, size, bDestEnd := (destStart + tmp) * int(darr.t.Size()) // then we get the data as a slice of raw bytes - sBS := storage.AsByteSlice(&sarr.Header, sarr.t.Type) - dBS := storage.AsByteSlice(&darr.Header, darr.t.Type) + sBS := sarr.Header.Raw + dBS := darr.Header.Raw // recall that len(src) < len(dest) // it's easier to understand if we define the ranges. diff --git a/defaultengine_matop_stack.go b/defaultengine_matop_stack.go index 368ddb5..879ca28 100644 --- a/defaultengine_matop_stack.go +++ b/defaultengine_matop_stack.go @@ -2,7 +2,6 @@ package tensor import ( "github.com/pkg/errors" - "gorgonia.org/tensor/internal/storage" ) // This file contains code for the execution engine to stack tensors @@ -366,7 +365,7 @@ func (e StdEng) doViewStack8(t, retVal DenseTensor, axisStride, batches int, it func (e StdEng) doViewStackArbitrary(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { dt := t.Dtype() - data := storage.AsByteSlice(retVal.hdr(), dt.Type)[:0] + data := retVal.hdr().Raw[:0] // truncate to 0 size := int(dt.Size()) var mask []bool var retIsMasked bool @@ -385,8 +384,7 @@ func (e StdEng) doViewStackArbitrary(t, retVal DenseTensor, axisStride, batches tmask = mt.Mask() isMasked = mt.IsMasked() } - dt := t.Dtype() - bs := storage.AsByteSlice(t.hdr(), dt.Type) + bs := t.hdr().Raw for last = 0; last < axisStride; last++ { id, err := it.Next() diff --git a/defaultengine_matop_transpose.go b/defaultengine_matop_transpose.go index 76a2f0a..cef220e 100644 --- a/defaultengine_matop_transpose.go +++ b/defaultengine_matop_transpose.go @@ -4,7 +4,6 @@ package tensor import ( "github.com/pkg/errors" - "gorgonia.org/tensor/internal/storage" ) func (e StdEng) Transpose(a Tensor, expStrides []int) error { @@ -140,7 +139,7 @@ func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) { // arbs := storage.AsByteSlice(tmpArr.hdr(), rtype) arbs := tmpArr.byteSlice() - orig := storage.AsByteSlice(a.hdr(), rtype) + orig := a.hdr().Raw it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { diff --git a/defaultengine_matop_transpose_inplace.go b/defaultengine_matop_transpose_inplace.go index 612e1cc..8627927 100644 --- a/defaultengine_matop_transpose_inplace.go +++ b/defaultengine_matop_transpose_inplace.go @@ -4,7 +4,6 @@ package tensor import ( "github.com/pkg/errors" - "gorgonia.org/tensor/internal/storage" ) func (e StdEng) Transpose(a Tensor, expStrides []int) error { @@ -290,7 +289,7 @@ func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) { saved := make([]byte, typeSize, typeSize) tmp := make([]byte, typeSize, typeSize) var i int - data := storage.AsByteSlice(a.hdr(), rtype) + data := a.arr().Raw if len(data) < 4*typeSize { return } diff --git a/defaultengine_prep.go b/defaultengine_prep.go index ea2b2f5..261367a 100644 --- a/defaultengine_prep.go +++ b/defaultengine_prep.go @@ -159,10 +159,10 @@ func prepDataVV(a, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Hea return } -func prepDataVS(a Tensor, b interface{}, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, iit Iterator, useIter bool, err error) { +func prepDataVS(a Tensor, b interface{}, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, iit Iterator, useIter bool, newAlloc bool, err error) { // get data dataA = a.hdr() - dataB = scalarToHeader(b) + dataB, newAlloc = scalarToHeader(b) if reuse != nil { dataReuse = reuse.hdr() } @@ -182,9 +182,9 @@ func prepDataVS(a Tensor, b interface{}, reuse Tensor) (dataA, dataB, dataReuse return } -func prepDataSV(a interface{}, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, bit, iit Iterator, useIter bool, err error) { +func prepDataSV(a interface{}, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, bit, iit Iterator, useIter bool, newAlloc bool, err error) { // get data - dataA = scalarToHeader(a) + dataA, newAlloc = scalarToHeader(a) dataB = b.hdr() if reuse != nil { dataReuse = reuse.hdr() diff --git a/defaultenginefloat32.go b/defaultenginefloat32.go index 82d48f2..45859a4 100644 --- a/defaultenginefloat32.go +++ b/defaultenginefloat32.go @@ -1,8 +1,6 @@ package tensor import ( - "unsafe" - "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" "gorgonia.org/tensor/internal/storage" @@ -118,12 +116,11 @@ func (e Float32Engine) makeArray(arr *array, t Dtype, size int) { if t != Float32 { panic("Float32Engine only creates float32s") } - s := make([]float32, size) + if size < 0 { + panic("Cannot have negative sizes when making array") + } + arr.Header.Raw = make([]byte, size*4) arr.t = t - arr.L = size - arr.C = size - arr.Ptr = unsafe.Pointer(&s[0]) - arr.fix() } func (e Float32Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) { diff --git a/defaultenginefloat32_test.go b/defaultenginefloat32_test.go new file mode 100644 index 0000000..0ebd016 --- /dev/null +++ b/defaultenginefloat32_test.go @@ -0,0 +1,42 @@ +package tensor + +import ( + "testing" + "testing/quick" +) + +func TestFloat32Engine_makeArray(t *testing.T) { + + // the uint16 is just to make sure that tests are correctly run. + // we don't want the quicktest to randomly generate a size that is so large + // that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers) + // after the quick test. + f := func(sz uint16) bool { + size := int(sz) + e := Float32Engine{StdEng{}} + dt := Float32 + arr := array{} + + e.makeArray(&arr, dt, size) + + if len(arr.Raw) != size*4 { + t.Errorf("Expected raw to be size*4. Got %v instead", len(arr.Raw)) + return false + } + v, ok := arr.Data().([]float32) + if !ok { + t.Errorf("Expected v to be []float32. Got %T instead", arr.Data()) + return false + } + + if len(v) != size { + return false + } + return true + } + + if err := quick.Check(f, nil); err != nil { + t.Errorf("Quick test failed %v", err) + } + +} diff --git a/defaultenginefloat64.go b/defaultenginefloat64.go index b0d9466..21bba43 100644 --- a/defaultenginefloat64.go +++ b/defaultenginefloat64.go @@ -1,8 +1,6 @@ package tensor import ( - "unsafe" - "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" "gorgonia.org/tensor/internal/storage" @@ -118,12 +116,8 @@ func (e Float64Engine) makeArray(arr *array, t Dtype, size int) { if t != Float64 { panic("Float64Engine only creates float64s") } - s := make([]float64, size) + arr.Header.Raw = make([]byte, size*8) arr.t = t - arr.L = size - arr.C = size - arr.Ptr = unsafe.Pointer(&s[0]) - arr.fix() } func (e Float64Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) { diff --git a/defaultenginefloat64_test.go b/defaultenginefloat64_test.go new file mode 100644 index 0000000..2d9391a --- /dev/null +++ b/defaultenginefloat64_test.go @@ -0,0 +1,42 @@ +package tensor + +import ( + "testing" + "testing/quick" +) + +func TestFloat64Engine_makeArray(t *testing.T) { + + // the uint16 is just to make sure that tests are correctly run. + // we don't want the quicktest to randomly generate a size that is so large + // that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers) + // after the quick test. + f := func(sz uint16) bool { + size := int(sz) + e := Float64Engine{StdEng{}} + dt := Float64 + arr := array{} + + e.makeArray(&arr, dt, size) + + if len(arr.Raw) != size*8 { + t.Errorf("Expected raw to be size*8. Got %v instead", len(arr.Raw)) + return false + } + v, ok := arr.Data().([]float64) + if !ok { + t.Errorf("Expected v to be []float32. Got %T instead", arr.Data()) + return false + } + + if len(v) != size { + return false + } + return true + } + + if err := quick.Check(f, nil); err != nil { + t.Errorf("Quick test failed %v", err) + } + +} diff --git a/dense.go b/dense.go index fa9693d..d647ab5 100644 --- a/dense.go +++ b/dense.go @@ -6,6 +6,7 @@ import ( "unsafe" "github.com/pkg/errors" + "gorgonia.org/tensor/internal/storage" ) const ( @@ -47,14 +48,12 @@ func recycledDense(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) { } func recycledDenseNoFix(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) { - size := shape.TotalSize() - if shape.IsScalar() { - size = 1 - } + // size := shape.TotalSize() + //if shape.IsScalar() { + // size = 1 + //} retVal = borrowDense() retVal.array.t = dt - retVal.array.L = size - retVal.array.C = size retVal.AP.zeroWithDims(shape.Dims()) for _, opt := range opts { @@ -65,8 +64,7 @@ func recycledDenseNoFix(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) } func (t *Dense) fromSlice(x interface{}) { - t.array.Ptr = nil - t.array.v = nil + t.array.Header.Raw = nil // GC anything else t.array.fromSlice(x) } @@ -88,15 +86,13 @@ func (t *Dense) makeArray(size int) { default: } - mem, err := t.e.Alloc(calcMemSize(t.t, size)) + memsize := calcMemSize(t.t, size) + mem, err := t.e.Alloc(memsize) if err != nil { panic(err) } - t.array.Ptr = mem.Pointer() - t.array.L = size - t.array.C = size - t.array.fix() + t.array.Raw = storage.FromMemory(mem.Uintptr(), uintptr(memsize)) return } @@ -111,28 +107,25 @@ func (t *Dense) Data() interface{} { if t.IsScalar() { return t.Get(0) } - if t.v == nil { - // build a type of []T - shdr := reflect.SliceHeader{ - Data: uintptr(t.Header.Ptr), - Len: t.Header.L, - Cap: t.Header.C, - } - sliceT := reflect.SliceOf(t.t.Type) - ptr := unsafe.Pointer(&shdr) - val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) - t.v = val.Interface() + // build a type of []T + shdr := reflect.SliceHeader{ + Data: t.array.Uintptr(), + Len: t.array.Len(), + Cap: t.array.Cap(), } - return t.v + sliceT := reflect.SliceOf(t.t.Type) + ptr := unsafe.Pointer(&shdr) + val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) + return val.Interface() } // DataSize returns the size of the underlying array. Typically t.DataSize() == t.Shape().TotalSize() func (t *Dense) DataSize() int { if t.IsScalar() { - return 0 + return 0 // DOUBLE CHECK } - return t.L + return t.array.Len() } // Engine returns the execution engine associated with this Tensor @@ -212,7 +205,7 @@ func (t *Dense) Clone() interface{} { retVal.e = t.e retVal.oe = t.oe retVal.flag = t.flag - retVal.makeArray(t.L) + retVal.makeArray(t.Len()) if !t.old.IsZero() { retVal.old = t.old.Clone() @@ -270,8 +263,8 @@ func (t *Dense) MaskFromDense(tts ...*Dense) { // Private methods -func (t *Dense) cap() int { return t.array.C } -func (t *Dense) len() int { return t.array.L } // exactly the same as DataSize +func (t *Dense) cap() int { return t.array.Cap() } +func (t *Dense) len() int { return t.array.Len() } // exactly the same as DataSize func (t *Dense) arr() array { return t.array } func (t *Dense) arrPtr() *array { return &t.array } @@ -294,16 +287,16 @@ func (t *Dense) fix() { } switch { - case t.IsScalar() && t.array.Ptr == nil: + case t.IsScalar() && t.array.Header.Raw == nil: t.makeArray(1) - case t.Shape() == nil && t.array.Ptr != nil: - size := t.L + case t.Shape() == nil && t.array.Header.Raw != nil: + size := t.Len() if size == 1 { t.SetShape() // scalar } else { t.SetShape(size) // vector } - case t.array.Ptr == nil && t.t != Dtype{}: + case t.array.Header.Raw == nil && t.t != Dtype{}: size := t.Shape().TotalSize() t.makeArray(size) @@ -330,11 +323,11 @@ func (t *Dense) makeMask() { // sanity is a function that sanity checks that a tensor is correct. func (t *Dense) sanity() error { - if !t.AP.IsZero() && t.Shape() == nil && t.array.Ptr == nil { + if !t.AP.IsZero() && t.Shape() == nil && t.array.Header.Raw == nil { return errors.New(emptyTensor) } - size := t.L + size := t.Len() expected := t.Size() if t.viewOf == 0 && size != expected && !t.IsScalar() { return errors.Wrap(errors.Errorf(shapeMismatch, t.Shape(), size), "sanity check failed") diff --git a/dense_assign.go b/dense_assign.go index bd8bceb..5f44897 100644 --- a/dense_assign.go +++ b/dense_assign.go @@ -10,14 +10,14 @@ func overlaps(a, b DenseTensor) bool { } aarr := a.arr() barr := b.arr() - if aarr.Ptr == barr.Ptr { + if aarr.Uintptr() == barr.Uintptr() { return true } - aptr := uintptr(aarr.Ptr) - bptr := uintptr(barr.Ptr) + aptr := aarr.Uintptr() + bptr := barr.Uintptr() - capA := aptr + uintptr(aarr.C)*a.Dtype().Size() - capB := bptr + uintptr(barr.C)*b.Dtype().Size() + capA := aptr + uintptr(cap(aarr.Header.Raw)) + capB := bptr + uintptr(cap(barr.Header.Raw)) switch { case aptr < bptr: diff --git a/dense_io.go b/dense_io.go index e4717f8..7bb9608 100644 --- a/dense_io.go +++ b/dense_io.go @@ -808,7 +808,7 @@ func (t *Dense) FBDecode(buf []byte) error { // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, serialized.DataBytes()) - t.forcefix() + t.fix() return t.sanity() } diff --git a/dense_matop.go b/dense_matop.go index 5ce693b..7e81419 100644 --- a/dense_matop.go +++ b/dense_matop.go @@ -243,7 +243,6 @@ func (t *Dense) SliceInto(view *Dense, slices ...Slice) (retVal View, err error) } view.AP.zero() - view.array.v = nil // reset view.t = t.t view.e = t.e diff --git a/dense_matop_test.go b/dense_matop_test.go index 755309f..cf2ce7a 100644 --- a/dense_matop_test.go +++ b/dense_matop_test.go @@ -607,7 +607,9 @@ func TestDense_Slice(t *testing.T) { assert.True(V.(*Dense).old.IsZero()) // slice a sliced + t.Logf("%v", V) V, err = V.Slice(makeRS(1, 2)) + t.Logf("%v", V) assert.True(ScalarShape().Eq(V.Shape())) assert.Equal(float32(3), V.Data()) diff --git a/engine.go b/engine.go index a8ec63c..1ac8400 100644 --- a/engine.go +++ b/engine.go @@ -1,9 +1,5 @@ package tensor -import ( - "unsafe" -) - // Memory is a representation of memory of the value. // // The main reason for requiring both Uintptr() and Pointer() methods is because while Go currently does not have a compacting @@ -13,7 +9,6 @@ import ( type Memory interface { Uintptr() uintptr MemSize() uintptr - Pointer() unsafe.Pointer } // Engine is a representation of an execution engine. diff --git a/genlib2/agg1_body.go b/genlib2/agg1_body.go index 2ca9d96..f738d0c 100644 --- a/genlib2/agg1_body.go +++ b/genlib2/agg1_body.go @@ -5,8 +5,8 @@ import "text/template" // level 1 aggregation (internal.E) templates const ( - eArithRaw = `as := isScalar(a) - bs := isScalar(b) + eArithRaw = `as := isScalar(a, t) + bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} @@ -25,18 +25,18 @@ const ( default: {{if and $isDiv $p}} err = {{end}} Vec{{$name}}{{short .}}(at, bt) } - return + return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` - eArithIncrRaw = `as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + eArithIncrRaw = `as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { @@ -60,14 +60,14 @@ const ( default: {{$name}}Incr{{short .}}(at, bt,it) } - return + return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` - eArithIterRaw = `as := isScalar(a) - bs := isScalar(b) + eArithIterRaw = `as := isScalar(a, t) + bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} @@ -91,12 +91,12 @@ const ( } ` - eArithIterIncrRaw = `as :=isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + eArithIterIncrRaw = `as :=isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { @@ -126,7 +126,7 @@ const ( } ` - eMapRaw = `as := isScalar(a) + eMapRaw = `as := isScalar(a, t) switch t { {{range .Kinds -}} case {{reflectKind .}}: @@ -181,11 +181,11 @@ const ( Map{{short .}}(f0, at) } {{end -}} - + {{end -}} default: return errors.Errorf("Cannot map t of %v", t) - + } ` @@ -233,8 +233,8 @@ const ( } ` - eCmpSameRaw = `as := isScalar(a) - bs := isScalar(b) + eCmpSameRaw = `as := isScalar(a, t) + bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} @@ -252,20 +252,20 @@ const ( default: {{$name}}Same{{short .}}(at, bt) } - return + return {{end -}} {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) }` - eCmpBoolRaw = `as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + eCmpBoolRaw = `as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} @@ -285,15 +285,15 @@ const ( default: {{$name}}{{short .}}(at, bt, rt) } - return + return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` - eCmpSameIterRaw = `as := isScalar(a) - bs := isScalar(b) + eCmpSameIterRaw = `as := isScalar(a, t) + bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} @@ -319,13 +319,13 @@ const ( } ` - eCmpBoolIterRaw = `as :=isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + eCmpBoolIterRaw = `as :=isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} @@ -478,7 +478,7 @@ const ( return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } Clamp{{short .}}(a.{{sliceOf .}}, min, max) - return nil + return nil {{end -}} default: return errors.Errorf("Unsupported type %v for Clamp", t) @@ -553,7 +553,7 @@ const ( if _, ok := err.(NoOpError); ok { err = nil } - return + return {{end -}} default: return nil, errors.Errorf("Unsupported type %v for Arg{{.Name}}", t) diff --git a/genlib2/agg2_body.go b/genlib2/agg2_body.go index cf87bb0..1e16123 100644 --- a/genlib2/agg2_body.go +++ b/genlib2/agg2_body.go @@ -51,15 +51,15 @@ const prepMixedRaw = `if err = unaryCheck(t, {{.TypeClassCheck | lower}}Types); typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header - var useIter bool + var useIter, newAlloc bool if leftTensor { - if dataA, dataB, dataReuse, ait, iit, useIter, err = prepDataVS(t, s, reuse); err != nil { + if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.{{.Name}}") } scalarHeader = dataB } else { - if dataA, dataB, dataReuse, bit, iit, useIter, err = prepDataSV(s, t, reuse); err != nil { + if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.{{.Name}}") } scalarHeader = dataA @@ -133,7 +133,12 @@ const agg2BodyRaw = `if useIter { } {{end -}} } - {{if not .VV -}}returnHeader(scalarHeader){{end}} + {{if not .VV -}} + if newAlloc{ + freeScalar(scalarHeader.Raw) + } + returnHeader(scalarHeader) + {{end -}} return } switch { @@ -184,7 +189,12 @@ const agg2BodyRaw = `if useIter { err = e.E.{{.Name}}(typ, retVal.hdr(), dataB) {{end -}} } - {{if not .VV -}}returnHeader(scalarHeader){{end}} + {{if not .VV -}} + if newAlloc{ + freeScalar(scalarHeader.Raw) + } + returnHeader(scalarHeader) + {{end -}} return ` @@ -242,13 +252,18 @@ const agg2CmpBodyRaw = `// check to see if anything needs to be created err = e.E.{{.Name}}Iter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } - {{if not .VV -}}returnHeader(scalarHeader){{end}} + {{if not .VV -}} + if newAlloc{ + freeScalar(scalarHeader.Raw) + } + returnHeader(scalarHeader) + {{end -}} return } {{if not .VV -}} // handle special case where A and B have both len 1 - if dataA.L == 1 && dataB.L == 1 { + if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ,dataReuse,dataA) @@ -288,7 +303,12 @@ const agg2CmpBodyRaw = `// check to see if anything needs to be created err = e.E.{{.Name}}(typ, dataA, dataB, dataReuse) retVal = reuse } - {{if not .VV -}}returnHeader(scalarHeader){{end}} + {{if not .VV -}} + if newAlloc{ + freeScalar(scalarHeader.Raw) + } + returnHeader(scalarHeader) + {{end -}} return ` diff --git a/genlib2/array_getset.go b/genlib2/array_getset.go index 73a686b..a75edd5 100644 --- a/genlib2/array_getset.go +++ b/genlib2/array_getset.go @@ -6,7 +6,7 @@ import ( "text/template" ) -const asSliceRaw = `func (h *Header) {{asType . | strip | title}}s() []{{asType .}} { return *(*[]{{asType .}})(unsafe.Pointer(h)) } +const asSliceRaw = `func (h *Header) {{asType . | strip | title}}s() []{{asType .}} {return (*(*[]{{asType .}})(unsafe.Pointer(&h.Raw)))[:h.TypedLen({{short . | unexport}}Type):h.TypedLen({{short . | unexport}}Type)]} ` const setBasicRaw = `func (h *Header) Set{{short . }}(i int, x {{asType . }}) { h.{{sliceOf .}}[i] = x } @@ -23,11 +23,10 @@ func (a *array) Get(i int) interface{} { {{else -}} case reflect.{{reflectKind .}}: return a.{{getOne .}}(i) - {{end -}} + {{end -}}; {{end -}} default: - at := unsafe.Pointer(uintptr(a.Ptr) + uintptr(i) * a.t.Size()) - val := reflect.NewAt(a.t.Type, at) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) return val.Interface() } @@ -47,8 +46,7 @@ func (a *array) Set(i int, x interface{}) { {{end -}} default: xv := reflect.ValueOf(x) - want := unsafe.Pointer(uintptr(a.Ptr) + uintptr(i)*a.t.Size()) - val := reflect.NewAt(a.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } @@ -76,10 +74,9 @@ func (a *array) Memset(x interface{}) error { } xv := reflect.ValueOf(x) - ptr := uintptr(a.Ptr) - for i := 0; i < a.L; i++ { - want := ptr + uintptr(i)*a.t.Size() - val := reflect.NewAt(a.t.Type, unsafe.Pointer(want)) + l := a.Len() + for i := 0; i < l; i++ { + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } @@ -94,7 +91,7 @@ func (a array) Eq(other interface{}) bool { return false } - if oa.L != a.L { + if oa.Len() != a.Len() { return false } /* @@ -104,7 +101,7 @@ func (a array) Eq(other interface{}) bool { */ // same exact thing - if uintptr(oa.Ptr) == uintptr(a.Ptr){ + if uintptr(unsafe.Pointer(&oa.Header.Raw[0])) == uintptr(unsafe.Pointer(&a.Header.Raw[0])){ return true } @@ -121,7 +118,7 @@ func (a array) Eq(other interface{}) bool { {{end -}} {{end -}} default: - for i := 0; i < a.L; i++{ + for i := 0; i < a.Len(); i++{ if !reflect.DeepEqual(a.Get(i), oa.Get(i)){ return false } @@ -179,18 +176,18 @@ const copyArrayIterRaw = `func copyArrayIter(dst, src array, diter, siter Iterat ` const memsetIterRaw = ` -func (t *array) memsetIter(x interface{}, it Iterator) (err error) { +func (a *array) memsetIter(x interface{}, it Iterator) (err error) { var i int - switch t.t{ + switch a.t{ {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind .}}: xv, ok := x.({{asType .}}) if !ok { - return errors.Errorf(dtypeMismatch, t.t, x) + return errors.Errorf(dtypeMismatch, a.t, x) } - data := t.{{sliceOf .}} + data := a.{{sliceOf .}} for i, err = it.Next(); err == nil; i, err = it.Next(){ data[i] = xv } @@ -199,10 +196,8 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { {{end -}} default: xv := reflect.ValueOf(x) - ptr := uintptr(t.Ptr) for i, err = it.Next(); err == nil; i, err = it.Next(){ - want := ptr + uintptr(i)*t.t.Size() - val := reflect.NewAt(t.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } @@ -213,14 +208,14 @@ func (t *array) memsetIter(x interface{}, it Iterator) (err error) { ` -const zeroIterRaw = `func (t *array) zeroIter(it Iterator) (err error){ +const zeroIterRaw = `func (a *array) zeroIter(it Iterator) (err error){ var i int - switch t.t { + switch a.t { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind .}}: - data := t.{{sliceOf .}} + data := a.{{sliceOf .}} for i, err = it.Next(); err == nil; i, err = it.Next(){ data[i] = {{if eq .String "bool" -}} false @@ -232,12 +227,10 @@ const zeroIterRaw = `func (t *array) zeroIter(it Iterator) (err error){ {{end -}} {{end -}} default: - ptr := uintptr(t.Ptr) for i, err = it.Next(); err == nil; i, err = it.Next(){ - want := ptr + uintptr(i)*t.t.Size() - val := reflect.NewAt(t.t.Type, unsafe.Pointer(want)) + val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) - val.Set(reflect.Zero(t.t)) + val.Set(reflect.Zero(a.t)) } err = handleNoOp(err) } @@ -245,16 +238,26 @@ const zeroIterRaw = `func (t *array) zeroIter(it Iterator) (err error){ } ` +const reflectConstTemplateRaw = `var ( + {{range .Kinds -}} + {{if isParameterized . -}} + {{else -}} + {{short . | unexport}}Type = reflect.TypeOf({{asType .}}({{if eq .String "bool" -}} false {{else if eq .String "string" -}}"" {{else if eq .String "unsafe.Pointer" -}}nil {{else -}}0{{end -}})) + {{end -}} + {{end -}} +)` + var ( - AsSlice *template.Template - SimpleSet *template.Template - SimpleGet *template.Template - Get *template.Template - Set *template.Template - Memset *template.Template - MemsetIter *template.Template - Eq *template.Template - ZeroIter *template.Template + AsSlice *template.Template + SimpleSet *template.Template + SimpleGet *template.Template + Get *template.Template + Set *template.Template + Memset *template.Template + MemsetIter *template.Template + Eq *template.Template + ZeroIter *template.Template + ReflectType *template.Template ) func init() { @@ -267,6 +270,7 @@ func init() { MemsetIter = template.Must(template.New("MemsetIter").Funcs(funcs).Parse(memsetIterRaw)) Eq = template.Must(template.New("ArrayEq").Funcs(funcs).Parse(arrayEqRaw)) ZeroIter = template.Must(template.New("Zero").Funcs(funcs).Parse(zeroIterRaw)) + ReflectType = template.Must(template.New("ReflectType").Funcs(funcs).Parse(reflectConstTemplateRaw)) } func generateArrayMethods(f io.Writer, ak Kinds) { @@ -295,3 +299,8 @@ func generateHeaderGetSet(f io.Writer, ak Kinds) { } } } + +func generateReflectTypes(f io.Writer, ak Kinds) { + ReflectType.Execute(f, ak) + fmt.Fprintf(f, "\n\n\n") +} diff --git a/genlib2/dense_io.go b/genlib2/dense_io.go index e6e4b0f..814067f 100644 --- a/genlib2/dense_io.go +++ b/genlib2/dense_io.go @@ -560,7 +560,7 @@ func (t *Dense) FBDecode(buf []byte) error { // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, serialized.DataBytes()) - t.forcefix() + t.fix() return t.sanity() } ` diff --git a/genlib2/main.go b/genlib2/main.go index fafd74c..328cd19 100644 --- a/genlib2/main.go +++ b/genlib2/main.go @@ -52,6 +52,7 @@ func main() { pregenerate() // storage + pipeline(storageLoc, "consts.go", Kinds{allKinds}, generateReflectTypes) pipeline(storageLoc, "getset.go", Kinds{allKinds}, generateHeaderGetSet) pipeline(tensorPkgLoc, "array_getset.go", Kinds{allKinds}, generateArrayMethods) diff --git a/internal/execution/e.go b/internal/execution/e.go index 670ae0b..83fcc1f 100644 --- a/internal/execution/e.go +++ b/internal/execution/e.go @@ -38,7 +38,7 @@ var ( UnsafePointer = reflect.TypeOf(unsafe.Pointer(&Uintptr)) ) -func isScalar(a *storage.Header) bool { return a.L == 1 } +func isScalar(a *storage.Header, t reflect.Type) bool { return a.TypedLen(t) == 1 } type errorIndices []int diff --git a/internal/execution/eng_arith.go b/internal/execution/eng_arith.go index f626a3d..f3de110 100644 --- a/internal/execution/eng_arith.go +++ b/internal/execution/eng_arith.go @@ -10,8 +10,8 @@ import ( ) func (e E) Add(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -230,8 +230,8 @@ func (e E) Add(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) Sub(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -436,8 +436,8 @@ func (e E) Sub(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) Mul(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -642,8 +642,8 @@ func (e E) Mul(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) Div(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -848,8 +848,8 @@ func (e E) Div(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) Pow(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Float32: @@ -914,8 +914,8 @@ func (e E) Pow(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) Mod(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -1092,11 +1092,11 @@ func (e E) Mod(t reflect.Type, a *storage.Header, b *storage.Header) (err error) } func (e E) AddIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -1406,11 +1406,11 @@ func (e E) AddIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) SubIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -1700,11 +1700,11 @@ func (e E) SubIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) MulIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -1994,11 +1994,11 @@ func (e E) MulIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) DivIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -2288,11 +2288,11 @@ func (e E) DivIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) PowIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -2382,11 +2382,11 @@ func (e E) PowIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) ModIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -2636,8 +2636,8 @@ func (e E) ModIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *s } func (e E) AddIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -2856,8 +2856,8 @@ func (e E) AddIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) SubIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -3062,8 +3062,8 @@ func (e E) SubIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) MulIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -3268,8 +3268,8 @@ func (e E) MulIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) DivIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -3474,8 +3474,8 @@ func (e E) DivIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) PowIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Float32: @@ -3540,8 +3540,8 @@ func (e E) PowIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) ModIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -3718,12 +3718,12 @@ func (e E) ModIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Ite } func (e E) AddIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4018,12 +4018,12 @@ func (e E) AddIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, inc } func (e E) SubIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4299,12 +4299,12 @@ func (e E) SubIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, inc } func (e E) MulIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4580,12 +4580,12 @@ func (e E) MulIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, inc } func (e E) DivIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4861,12 +4861,12 @@ func (e E) DivIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, inc } func (e E) PowIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4952,12 +4952,12 @@ func (e E) PowIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, inc } func (e E) ModIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - is := isScalar(incr) + as := isScalar(a, t) + bs := isScalar(b, t) + is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { - return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { diff --git a/internal/execution/eng_arith_manual.go b/internal/execution/eng_arith_manual.go index 941d644..3a620e6 100644 --- a/internal/execution/eng_arith_manual.go +++ b/internal/execution/eng_arith_manual.go @@ -8,26 +8,20 @@ import ( ) func (e E) AddSliced(t reflect.Type, dataA *storage.Header, dstStart, dstEnd int, dataB *storage.Header, srcStart, srcEnd int) (err error) { + ds := dstStart * int(t.Size()) + de := dstEnd * int(t.Size()) a := &storage.Header{ - Ptr: storage.ElementAt(dstStart, dataA.Ptr, t.Size()), - L: dstEnd - dstStart, - C: dataA.C - dstStart, - } - if a.C == 0 { - a.C = 1 + Raw: dataA.Raw[ds:de], } + ss := srcStart * int(t.Size()) + se := srcEnd * int(t.Size()) b := &storage.Header{ - Ptr: storage.ElementAt(srcStart, dataB.Ptr, t.Size()), - L: srcEnd - srcStart, - C: dataB.C - srcStart, - } - if b.C == 0 { - b.C = 1 + Raw: dataB.Raw[ss:se], } - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: diff --git a/internal/execution/eng_cmp.go b/internal/execution/eng_cmp.go index 9514f61..b2c4ece 100644 --- a/internal/execution/eng_cmp.go +++ b/internal/execution/eng_cmp.go @@ -10,13 +10,13 @@ import ( ) func (e E) Gt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -221,13 +221,13 @@ func (e E) Gt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *stor } func (e E) Gte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -432,13 +432,13 @@ func (e E) Gte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *sto } func (e E) Lt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -643,13 +643,13 @@ func (e E) Lt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *stor } func (e E) Lte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -854,13 +854,13 @@ func (e E) Lte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *sto } func (e E) Eq(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -1140,13 +1140,13 @@ func (e E) Eq(t reflect.Type, a *storage.Header, b *storage.Header, retVal *stor } func (e E) Ne(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is a scalar. a: %d, b %d", a.Len(), b.Len()) + return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -1426,8 +1426,8 @@ func (e E) Ne(t reflect.Type, a *storage.Header, b *storage.Header, retVal *stor } func (e E) GtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -1618,8 +1618,8 @@ func (e E) GtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err err } func (e E) GteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -1810,8 +1810,8 @@ func (e E) GteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err er } func (e E) LtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -2002,8 +2002,8 @@ func (e E) LtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err err } func (e E) LteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -2194,8 +2194,8 @@ func (e E) LteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err er } func (e E) EqSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Bool: @@ -2442,8 +2442,8 @@ func (e E) EqSame(t reflect.Type, a *storage.Header, b *storage.Header) (err err } func (e E) NeSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Bool: @@ -2690,13 +2690,13 @@ func (e E) NeSame(t reflect.Type, a *storage.Header, b *storage.Header) (err err } func (e E) GtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -2888,13 +2888,13 @@ func (e E) GtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal * } func (e E) GteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -3086,13 +3086,13 @@ func (e E) GteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal } func (e E) LtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -3284,13 +3284,13 @@ func (e E) LtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal * } func (e E) LteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -3482,13 +3482,13 @@ func (e E) LteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal } func (e E) EqIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -3750,13 +3750,13 @@ func (e E) EqIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal * } func (e E) NeIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) - rs := isScalar(retVal) + as := isScalar(a, t) + bs := isScalar(b, t) + rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { - return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.Len(), b.Len()) + return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { @@ -4018,8 +4018,8 @@ func (e E) NeIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal * } func (e E) GtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -4210,8 +4210,8 @@ func (e E) GtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait } func (e E) GteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -4402,8 +4402,8 @@ func (e E) GteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait } func (e E) LtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -4594,8 +4594,8 @@ func (e E) LtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait } func (e E) LteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Int: @@ -4786,8 +4786,8 @@ func (e E) LteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait } func (e E) EqSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Bool: @@ -5034,8 +5034,8 @@ func (e E) EqSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait } func (e E) NeSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { - as := isScalar(a) - bs := isScalar(b) + as := isScalar(a, t) + bs := isScalar(b, t) switch t { case Bool: diff --git a/internal/execution/eng_map.go b/internal/execution/eng_map.go index 17ca682..81cb2c4 100644 --- a/internal/execution/eng_map.go +++ b/internal/execution/eng_map.go @@ -11,7 +11,7 @@ import ( ) func (e E) Map(t reflect.Type, fn interface{}, a *storage.Header, incr bool) (err error) { - as := isScalar(a) + as := isScalar(a, t) switch t { case Bool: var f0 func(bool) bool diff --git a/internal/storage/consts.go b/internal/storage/consts.go new file mode 100644 index 0000000..7304ac5 --- /dev/null +++ b/internal/storage/consts.go @@ -0,0 +1,29 @@ +// Code generated by genlib2. DO NOT EDIT. + +package storage + +import ( + "reflect" + "unsafe" +) + +var ( + bType = reflect.TypeOf(bool(false)) + iType = reflect.TypeOf(int(0)) + i8Type = reflect.TypeOf(int8(0)) + i16Type = reflect.TypeOf(int16(0)) + i32Type = reflect.TypeOf(int32(0)) + i64Type = reflect.TypeOf(int64(0)) + uType = reflect.TypeOf(uint(0)) + u8Type = reflect.TypeOf(uint8(0)) + u16Type = reflect.TypeOf(uint16(0)) + u32Type = reflect.TypeOf(uint32(0)) + u64Type = reflect.TypeOf(uint64(0)) + uintptrType = reflect.TypeOf(uintptr(0)) + f32Type = reflect.TypeOf(float32(0)) + f64Type = reflect.TypeOf(float64(0)) + c64Type = reflect.TypeOf(complex64(0)) + c128Type = reflect.TypeOf(complex128(0)) + strType = reflect.TypeOf(string("")) + unsafePointerType = reflect.TypeOf(unsafe.Pointer(nil)) +) diff --git a/internal/storage/getset.go b/internal/storage/getset.go index 879a5e3..c60d61c 100644 --- a/internal/storage/getset.go +++ b/internal/storage/getset.go @@ -6,108 +6,144 @@ import "unsafe" /* bool */ -func (h *Header) Bools() []bool { return *(*[]bool)(unsafe.Pointer(h)) } +func (h *Header) Bools() []bool { + return (*(*[]bool)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(bType):h.TypedLen(bType)] +} func (h *Header) SetB(i int, x bool) { h.Bools()[i] = x } func (h *Header) GetB(i int) bool { return h.Bools()[i] } /* int */ -func (h *Header) Ints() []int { return *(*[]int)(unsafe.Pointer(h)) } +func (h *Header) Ints() []int { + return (*(*[]int)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(iType):h.TypedLen(iType)] +} func (h *Header) SetI(i int, x int) { h.Ints()[i] = x } func (h *Header) GetI(i int) int { return h.Ints()[i] } /* int8 */ -func (h *Header) Int8s() []int8 { return *(*[]int8)(unsafe.Pointer(h)) } +func (h *Header) Int8s() []int8 { + return (*(*[]int8)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i8Type):h.TypedLen(i8Type)] +} func (h *Header) SetI8(i int, x int8) { h.Int8s()[i] = x } func (h *Header) GetI8(i int) int8 { return h.Int8s()[i] } /* int16 */ -func (h *Header) Int16s() []int16 { return *(*[]int16)(unsafe.Pointer(h)) } +func (h *Header) Int16s() []int16 { + return (*(*[]int16)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i16Type):h.TypedLen(i16Type)] +} func (h *Header) SetI16(i int, x int16) { h.Int16s()[i] = x } func (h *Header) GetI16(i int) int16 { return h.Int16s()[i] } /* int32 */ -func (h *Header) Int32s() []int32 { return *(*[]int32)(unsafe.Pointer(h)) } +func (h *Header) Int32s() []int32 { + return (*(*[]int32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i32Type):h.TypedLen(i32Type)] +} func (h *Header) SetI32(i int, x int32) { h.Int32s()[i] = x } func (h *Header) GetI32(i int) int32 { return h.Int32s()[i] } /* int64 */ -func (h *Header) Int64s() []int64 { return *(*[]int64)(unsafe.Pointer(h)) } +func (h *Header) Int64s() []int64 { + return (*(*[]int64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i64Type):h.TypedLen(i64Type)] +} func (h *Header) SetI64(i int, x int64) { h.Int64s()[i] = x } func (h *Header) GetI64(i int) int64 { return h.Int64s()[i] } /* uint */ -func (h *Header) Uints() []uint { return *(*[]uint)(unsafe.Pointer(h)) } +func (h *Header) Uints() []uint { + return (*(*[]uint)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(uType):h.TypedLen(uType)] +} func (h *Header) SetU(i int, x uint) { h.Uints()[i] = x } func (h *Header) GetU(i int) uint { return h.Uints()[i] } /* uint8 */ -func (h *Header) Uint8s() []uint8 { return *(*[]uint8)(unsafe.Pointer(h)) } +func (h *Header) Uint8s() []uint8 { + return (*(*[]uint8)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u8Type):h.TypedLen(u8Type)] +} func (h *Header) SetU8(i int, x uint8) { h.Uint8s()[i] = x } func (h *Header) GetU8(i int) uint8 { return h.Uint8s()[i] } /* uint16 */ -func (h *Header) Uint16s() []uint16 { return *(*[]uint16)(unsafe.Pointer(h)) } +func (h *Header) Uint16s() []uint16 { + return (*(*[]uint16)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u16Type):h.TypedLen(u16Type)] +} func (h *Header) SetU16(i int, x uint16) { h.Uint16s()[i] = x } func (h *Header) GetU16(i int) uint16 { return h.Uint16s()[i] } /* uint32 */ -func (h *Header) Uint32s() []uint32 { return *(*[]uint32)(unsafe.Pointer(h)) } +func (h *Header) Uint32s() []uint32 { + return (*(*[]uint32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u32Type):h.TypedLen(u32Type)] +} func (h *Header) SetU32(i int, x uint32) { h.Uint32s()[i] = x } func (h *Header) GetU32(i int) uint32 { return h.Uint32s()[i] } /* uint64 */ -func (h *Header) Uint64s() []uint64 { return *(*[]uint64)(unsafe.Pointer(h)) } +func (h *Header) Uint64s() []uint64 { + return (*(*[]uint64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u64Type):h.TypedLen(u64Type)] +} func (h *Header) SetU64(i int, x uint64) { h.Uint64s()[i] = x } func (h *Header) GetU64(i int) uint64 { return h.Uint64s()[i] } /* uintptr */ -func (h *Header) Uintptrs() []uintptr { return *(*[]uintptr)(unsafe.Pointer(h)) } +func (h *Header) Uintptrs() []uintptr { + return (*(*[]uintptr)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(uintptrType):h.TypedLen(uintptrType)] +} func (h *Header) SetUintptr(i int, x uintptr) { h.Uintptrs()[i] = x } func (h *Header) GetUintptr(i int) uintptr { return h.Uintptrs()[i] } /* float32 */ -func (h *Header) Float32s() []float32 { return *(*[]float32)(unsafe.Pointer(h)) } +func (h *Header) Float32s() []float32 { + return (*(*[]float32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(f32Type):h.TypedLen(f32Type)] +} func (h *Header) SetF32(i int, x float32) { h.Float32s()[i] = x } func (h *Header) GetF32(i int) float32 { return h.Float32s()[i] } /* float64 */ -func (h *Header) Float64s() []float64 { return *(*[]float64)(unsafe.Pointer(h)) } +func (h *Header) Float64s() []float64 { + return (*(*[]float64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(f64Type):h.TypedLen(f64Type)] +} func (h *Header) SetF64(i int, x float64) { h.Float64s()[i] = x } func (h *Header) GetF64(i int) float64 { return h.Float64s()[i] } /* complex64 */ -func (h *Header) Complex64s() []complex64 { return *(*[]complex64)(unsafe.Pointer(h)) } +func (h *Header) Complex64s() []complex64 { + return (*(*[]complex64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(c64Type):h.TypedLen(c64Type)] +} func (h *Header) SetC64(i int, x complex64) { h.Complex64s()[i] = x } func (h *Header) GetC64(i int) complex64 { return h.Complex64s()[i] } /* complex128 */ -func (h *Header) Complex128s() []complex128 { return *(*[]complex128)(unsafe.Pointer(h)) } +func (h *Header) Complex128s() []complex128 { + return (*(*[]complex128)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(c128Type):h.TypedLen(c128Type)] +} func (h *Header) SetC128(i int, x complex128) { h.Complex128s()[i] = x } func (h *Header) GetC128(i int) complex128 { return h.Complex128s()[i] } /* string */ -func (h *Header) Strings() []string { return *(*[]string)(unsafe.Pointer(h)) } +func (h *Header) Strings() []string { + return (*(*[]string)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(strType):h.TypedLen(strType)] +} func (h *Header) SetStr(i int, x string) { h.Strings()[i] = x } func (h *Header) GetStr(i int) string { return h.Strings()[i] } /* unsafe.Pointer */ -func (h *Header) UnsafePointers() []unsafe.Pointer { return *(*[]unsafe.Pointer)(unsafe.Pointer(h)) } +func (h *Header) UnsafePointers() []unsafe.Pointer { + return (*(*[]unsafe.Pointer)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(unsafePointerType):h.TypedLen(unsafePointerType)] +} func (h *Header) SetUnsafePointer(i int, x unsafe.Pointer) { h.UnsafePointers()[i] = x } func (h *Header) GetUnsafePointer(i int) unsafe.Pointer { return h.UnsafePointers()[i] } diff --git a/internal/storage/header.go b/internal/storage/header.go index 249f2fc..99414a2 100644 --- a/internal/storage/header.go +++ b/internal/storage/header.go @@ -9,22 +9,22 @@ import ( // With this, we wouldn't need to keep the uintptr. // This usually means additional pressure for the GC though, especially when passing around Headers type Header struct { - Ptr unsafe.Pointer - L int - C int + Raw []byte } -func (h *Header) Pointer() unsafe.Pointer { return h.Ptr } -func (h *Header) Len() int { return h.L } +func (h *Header) TypedLen(t reflect.Type) int { + sz := int(t.Size()) + return len(h.Raw) / sz +} func Copy(t reflect.Type, dst, src *Header) int { - if dst.L == 0 || src.L == 0 { + if len(dst.Raw) == 0 || len(src.Raw) == 0 { return 0 } - n := src.L - if dst.L < n { - n = dst.L + n := src.TypedLen(t) + if len(dst.Raw) < n { + n = dst.TypedLen(t) } // handle struct{} type @@ -37,15 +37,15 @@ func Copy(t reflect.Type, dst, src *Header) int { // otherwise, just copy bytes. // FUTURE: implement memmove - dstBA := AsByteSlice(dst, t) - srcBA := AsByteSlice(src, t) + dstBA := dst.Raw + srcBA := src.Raw copied := copy(dstBA, srcBA) return copied / int(t.Size()) } func CopySliced(t reflect.Type, dst *Header, dstart, dend int, src *Header, sstart, send int) int { - dstBA := AsByteSlice(dst, t) - srcBA := AsByteSlice(src, t) + dstBA := dst.Raw + srcBA := src.Raw size := int(t.Size()) ds := dstart * size @@ -57,8 +57,8 @@ func CopySliced(t reflect.Type, dst *Header, dstart, dend int, src *Header, ssta } func Fill(t reflect.Type, dst, src *Header) int { - dstBA := AsByteSlice(dst, t) - srcBA := AsByteSlice(src, t) + dstBA := dst.Raw + srcBA := src.Raw size := int(t.Size()) lenSrc := len(srcBA) @@ -74,8 +74,8 @@ func Fill(t reflect.Type, dst, src *Header) int { } func CopyIter(t reflect.Type, dst, src *Header, diter, siter Iterator) int { - dstBA := AsByteSlice(dst, t) - srcBA := AsByteSlice(src, t) + dstBA := dst.Raw + srcBA := src.Raw size := int(t.Size()) var idx, jdx, i, j, count int @@ -102,17 +102,30 @@ func CopyIter(t reflect.Type, dst, src *Header, diter, siter Iterator) int { return count } -func AsByteSlice(a *Header, t reflect.Type) []byte { - size := a.L * int(t.Size()) - b := make([]byte, 0) - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - hdr.Data = uintptr(a.Ptr) - hdr.Cap = size - hdr.Len = size - return b -} - // Element gets the pointer of ith element func ElementAt(i int, base unsafe.Pointer, typeSize uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + uintptr(i)*typeSize) } + +// AsByteSlice takes a slice of anything and returns a casted-as-byte-slice view of it. +// This function panics if input is not a slice. +func AsByteSlice(x interface{}) []byte { + xV := reflect.ValueOf(x) + xT := reflect.TypeOf(x).Elem() // expects a []T + + hdr := reflect.SliceHeader{ + Data: xV.Pointer(), + Len: xV.Len() * int(xT.Size()), + Cap: xV.Cap() * int(xT.Size()), + } + return *(*[]byte)(unsafe.Pointer(&hdr)) +} + +func FromMemory(ptr uintptr, memsize uintptr) []byte { + hdr := reflect.SliceHeader{ + Data: ptr, + Len: int(memsize), + Cap: int(memsize), + } + return *(*[]byte)(unsafe.Pointer(&hdr)) +} diff --git a/known_race_test.go b/known_race_test.go index cb9e265..f6d5616 100644 --- a/known_race_test.go +++ b/known_race_test.go @@ -1,3 +1,4 @@ +// +build ignore // +build !race package tensor diff --git a/perf.go b/perf.go index 2d20df2..bc5c3aa 100644 --- a/perf.go +++ b/perf.go @@ -56,9 +56,7 @@ func returnHeader(hdr *storage.Header) { } func destroyHeader(hdr *storage.Header) { - hdr.Ptr = nil - hdr.L = 0 - hdr.C = 0 + hdr.Raw = nil } var densePool = make(chan *Dense, PoolSize) @@ -92,10 +90,7 @@ func ReturnTensor(t Tensor) { // array reset tt.t = Dtype{} - tt.array.Ptr = nil - tt.array.L = 0 - tt.array.C = 0 - tt.array.v = nil + tt.array.Header.Raw = nil // engine and flag reset tt.e = StdEng{} diff --git a/sparse.go b/sparse.go index 3126843..1a9da7c 100644 --- a/sparse.go +++ b/sparse.go @@ -2,7 +2,6 @@ package tensor import ( "reflect" - "unsafe" "sort" @@ -29,7 +28,7 @@ type coo struct { data array } -func (c *coo) Len() int { return c.data.L } +func (c *coo) Len() int { return c.data.Len() } func (c *coo) Less(i, j int) bool { if c.o.IsColMajor() { return c.colMajorLess(i, j) @@ -187,7 +186,7 @@ func (t *CS) Strides() []int { return nil } func (t *CS) Dtype() Dtype { return t.t } func (t *CS) Dims() int { return 2 } func (t *CS) Size() int { return t.s.TotalSize() } -func (t *CS) DataSize() int { return t.L } +func (t *CS) DataSize() int { return t.Len() } func (t *CS) Engine() Engine { return t.e } func (t *CS) DataOrder() DataOrder { return t.o } @@ -289,7 +288,7 @@ func (t *CS) Clone() interface{} { retVal.indptr = make([]int, len(t.indptr)) copy(retVal.indices, t.indices) copy(retVal.indptr, t.indptr) - retVal.array = makeArray(t.t, t.array.L) + retVal.array = makeArray(t.t, t.array.Len()) copyArray(&retVal.array, &t.array) retVal.e = t.e return retVal @@ -298,12 +297,11 @@ func (t *CS) Clone() interface{} { func (t *CS) IsScalar() bool { return false } func (t *CS) ScalarValue() interface{} { panic("Sparse Matrices cannot represent Scalar Values") } -func (t *CS) MemSize() uintptr { return uintptr(calcMemSize(t.t, t.array.L)) } -func (t *CS) Uintptr() uintptr { return uintptr(t.array.Ptr) } -func (t *CS) Pointer() unsafe.Pointer { return t.array.Ptr } +func (t *CS) MemSize() uintptr { return uintptr(calcMemSize(t.t, t.array.Len())) } +func (t *CS) Uintptr() uintptr { return t.array.Uintptr() } // NonZeroes returns the nonzeroes. In academic literature this is often written as NNZ. -func (t *CS) NonZeroes() int { return t.L } +func (t *CS) NonZeroes() int { return t.Len() } func (t *CS) RequiresIterator() bool { return true } func (t *CS) Iterator() Iterator { return NewFlatSparseIterator(t) } diff --git a/tensor.go b/tensor.go index ff7e347..071ca67 100644 --- a/tensor.go +++ b/tensor.go @@ -6,7 +6,6 @@ import ( "encoding/gob" "fmt" "io" - "unsafe" "github.com/pkg/errors" ) @@ -62,10 +61,8 @@ type Tensor interface { // engine/memory related stuff // all Tensors should be able to be expressed of as a slab of memory // Note: the size of each element can be acquired by T.Dtype().Size() + Memory // Tensors all implement Memory Engine() Engine // Engine can be nil - MemSize() uintptr // the size in memory - Uintptr() uintptr // the pointer to the first element, as a uintptr - Pointer() unsafe.Pointer // the pointer to the first elemment as a unsafe.Ponter IsNativelyAccessible() bool // Can Go access the memory IsManuallyManaged() bool // Must Go manage the memory diff --git a/testutils_test.go b/testutils_test.go index 20cdda9..3a0d466 100644 --- a/testutils_test.go +++ b/testutils_test.go @@ -477,17 +477,10 @@ func (e dummyEngine) Memclr(mem Memory) {} func (e dummyEngine) Memcpy(dst, src Memory) error { if e { var a, b storage.Header - a.Ptr = src.Pointer() - a.L = int(src.MemSize()) - a.C = int(src.MemSize()) + a.Raw = storage.FromMemory(src.Uintptr(), src.MemSize()) + b.Raw = storage.FromMemory(dst.Uintptr(), dst.MemSize()) - b.Ptr = dst.Pointer() - b.L = int(dst.MemSize()) - b.C = int(dst.MemSize()) - - abs := *(*[]byte)(unsafe.Pointer(&a)) - bbs := *(*[]byte)(unsafe.Pointer(&b)) - copy(bbs, abs) + copy(b.Raw, a.Raw) return nil } return errors.New("Unable to copy ")