diff --git a/ecc/bls12-377/g1_test.go b/ecc/bls12-377/g1_test.go index c2558e677..e53959de9 100644 --- a/ecc/bls12-377/g1_test.go +++ b/ecc/bls12-377/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bls12-377/g2_test.go b/ecc/bls12-377/g2_test.go index 6e7876ee6..6e1339758 100644 --- a/ecc/bls12-377/g2_test.go +++ b/ecc/bls12-377/g2_test.go @@ -707,20 +707,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bls12-377/multiexp_test.go b/ecc/bls12-377/multiexp_test.go index 712b8f7d3..65c8a7b75 100644 --- a/ecc/bls12-377/multiexp_test.go +++ b/ecc/bls12-377/multiexp_test.go @@ -19,7 +19,6 @@ package bls12377 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bls12-377/pairing_test.go b/ecc/bls12-377/pairing_test.go index ab01c347e..b3c210082 100644 --- a/ecc/bls12-377/pairing_test.go +++ b/ecc/bls12-377/pairing_test.go @@ -449,18 +449,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bls12-378/g1_test.go b/ecc/bls12-378/g1_test.go index 8fd0e260b..08087e491 100644 --- a/ecc/bls12-378/g1_test.go +++ b/ecc/bls12-378/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bls12-378/g2_test.go b/ecc/bls12-378/g2_test.go index ac43de860..8f3140359 100644 --- a/ecc/bls12-378/g2_test.go +++ b/ecc/bls12-378/g2_test.go @@ -707,20 +707,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bls12-378/multiexp_test.go b/ecc/bls12-378/multiexp_test.go index ad7d9474d..ae05e3e70 100644 --- a/ecc/bls12-378/multiexp_test.go +++ b/ecc/bls12-378/multiexp_test.go @@ -19,7 +19,6 @@ package bls12378 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bls12-378/pairing_test.go b/ecc/bls12-378/pairing_test.go index 9a78abecd..9d1fd9c39 100644 --- a/ecc/bls12-378/pairing_test.go +++ b/ecc/bls12-378/pairing_test.go @@ -449,18 +449,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bls12-381/g1_test.go b/ecc/bls12-381/g1_test.go index 7f0fab5a1..23e79f82d 100644 --- a/ecc/bls12-381/g1_test.go +++ b/ecc/bls12-381/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bls12-381/g2_test.go b/ecc/bls12-381/g2_test.go index d414651c6..6a73f0d08 100644 --- a/ecc/bls12-381/g2_test.go +++ b/ecc/bls12-381/g2_test.go @@ -707,20 +707,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bls12-381/multiexp_test.go b/ecc/bls12-381/multiexp_test.go index 92dd4c7eb..19c51cb54 100644 --- a/ecc/bls12-381/multiexp_test.go +++ b/ecc/bls12-381/multiexp_test.go @@ -19,7 +19,6 @@ package bls12381 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bls12-381/pairing_test.go b/ecc/bls12-381/pairing_test.go index 48a8ade0a..e326a987a 100644 --- a/ecc/bls12-381/pairing_test.go +++ b/ecc/bls12-381/pairing_test.go @@ -449,18 +449,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bls24-315/g1_test.go b/ecc/bls24-315/g1_test.go index c2fed674d..608875740 100644 --- a/ecc/bls24-315/g1_test.go +++ b/ecc/bls24-315/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bls24-315/g2_test.go b/ecc/bls24-315/g2_test.go index 9fb276cc1..2106d1158 100644 --- a/ecc/bls24-315/g2_test.go +++ b/ecc/bls24-315/g2_test.go @@ -707,20 +707,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bls24-315/multiexp_test.go b/ecc/bls24-315/multiexp_test.go index 7961763cf..e01a646e6 100644 --- a/ecc/bls24-315/multiexp_test.go +++ b/ecc/bls24-315/multiexp_test.go @@ -19,7 +19,6 @@ package bls24315 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bls24-315/pairing_test.go b/ecc/bls24-315/pairing_test.go index 5ba06138c..a38ab1a1d 100644 --- a/ecc/bls24-315/pairing_test.go +++ b/ecc/bls24-315/pairing_test.go @@ -450,18 +450,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bls24-317/g1_test.go b/ecc/bls24-317/g1_test.go index c4909aa9c..2e7e2fca9 100644 --- a/ecc/bls24-317/g1_test.go +++ b/ecc/bls24-317/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bls24-317/g2_test.go b/ecc/bls24-317/g2_test.go index 8d11b2422..acc1a9ff8 100644 --- a/ecc/bls24-317/g2_test.go +++ b/ecc/bls24-317/g2_test.go @@ -707,20 +707,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bls24-317/multiexp_test.go b/ecc/bls24-317/multiexp_test.go index efd3fb370..7042983f6 100644 --- a/ecc/bls24-317/multiexp_test.go +++ b/ecc/bls24-317/multiexp_test.go @@ -19,7 +19,6 @@ package bls24317 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bls24-317/pairing_test.go b/ecc/bls24-317/pairing_test.go index 1c995257d..19d2df978 100644 --- a/ecc/bls24-317/pairing_test.go +++ b/ecc/bls24-317/pairing_test.go @@ -450,18 +450,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bn254/g1_test.go b/ecc/bn254/g1_test.go index cfbb1ed07..b2f421a8c 100644 --- a/ecc/bn254/g1_test.go +++ b/ecc/bn254/g1_test.go @@ -679,20 +679,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bn254/g2_test.go b/ecc/bn254/g2_test.go index a8de3339b..8aff5b834 100644 --- a/ecc/bn254/g2_test.go +++ b/ecc/bn254/g2_test.go @@ -706,20 +706,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bn254/multiexp_test.go b/ecc/bn254/multiexp_test.go index e1f848e9f..b4c019c70 100644 --- a/ecc/bn254/multiexp_test.go +++ b/ecc/bn254/multiexp_test.go @@ -19,7 +19,6 @@ package bn254 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bn254/pairing_test.go b/ecc/bn254/pairing_test.go index e8107a44b..8869ca764 100644 --- a/ecc/bn254/pairing_test.go +++ b/ecc/bn254/pairing_test.go @@ -449,18 +449,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bw6-633/g1_test.go b/ecc/bw6-633/g1_test.go index 37ff9ba14..f626ccdc9 100644 --- a/ecc/bw6-633/g1_test.go +++ b/ecc/bw6-633/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bw6-633/g2_test.go b/ecc/bw6-633/g2_test.go index 271bc6ab0..6ad4d63bb 100644 --- a/ecc/bw6-633/g2_test.go +++ b/ecc/bw6-633/g2_test.go @@ -688,20 +688,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bw6-633/multiexp_test.go b/ecc/bw6-633/multiexp_test.go index 1e45861b7..b16554c93 100644 --- a/ecc/bw6-633/multiexp_test.go +++ b/ecc/bw6-633/multiexp_test.go @@ -19,7 +19,6 @@ package bw6633 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bw6-633/pairing_test.go b/ecc/bw6-633/pairing_test.go index 70300a827..09786a379 100644 --- a/ecc/bw6-633/pairing_test.go +++ b/ecc/bw6-633/pairing_test.go @@ -450,18 +450,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bw6-756/g1_test.go b/ecc/bw6-756/g1_test.go index a93bed7b9..bce04f2ee 100644 --- a/ecc/bw6-756/g1_test.go +++ b/ecc/bw6-756/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bw6-756/g2_test.go b/ecc/bw6-756/g2_test.go index 723c1d143..02bf0fefb 100644 --- a/ecc/bw6-756/g2_test.go +++ b/ecc/bw6-756/g2_test.go @@ -688,20 +688,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bw6-756/multiexp_test.go b/ecc/bw6-756/multiexp_test.go index 64b515445..548721a9c 100644 --- a/ecc/bw6-756/multiexp_test.go +++ b/ecc/bw6-756/multiexp_test.go @@ -19,7 +19,6 @@ package bw6756 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bw6-756/pairing_test.go b/ecc/bw6-756/pairing_test.go index ee3774d96..0675098bb 100644 --- a/ecc/bw6-756/pairing_test.go +++ b/ecc/bw6-756/pairing_test.go @@ -450,18 +450,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/bw6-761/g1_test.go b/ecc/bw6-761/g1_test.go index a17405039..ac2f5d5dc 100644 --- a/ecc/bw6-761/g1_test.go +++ b/ecc/bw6-761/g1_test.go @@ -718,20 +718,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/bw6-761/g2_test.go b/ecc/bw6-761/g2_test.go index 89f69def8..8df3be080 100644 --- a/ecc/bw6-761/g2_test.go +++ b/ecc/bw6-761/g2_test.go @@ -688,20 +688,11 @@ func BenchmarkG2JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G2Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g2Gen, &scalar) - } - }) - - var glv G2Jac + var point G2Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g2Gen, &scalar) + point.ScalarMultiplication(&g2Gen, &scalar) } }) diff --git a/ecc/bw6-761/multiexp_test.go b/ecc/bw6-761/multiexp_test.go index d98bd688b..b62b4c575 100644 --- a/ecc/bw6-761/multiexp_test.go +++ b/ecc/bw6-761/multiexp_test.go @@ -19,7 +19,6 @@ package bw6761 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } @@ -727,43 +691,22 @@ func _innerMsmG2Reference(p *G2Jac, points []G2Affine, scalars []fr.Element, con func BenchmarkMultiExpG2(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G2Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G2Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG2(samplePoints[:]) var testPoint G2Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -772,20 +715,6 @@ func BenchmarkMultiExpG2(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/ecc/bw6-761/pairing_test.go b/ecc/bw6-761/pairing_test.go index 7d8e85771..ee1709f74 100644 --- a/ecc/bw6-761/pairing_test.go +++ b/ecc/bw6-761/pairing_test.go @@ -450,18 +450,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/ecc/secp256k1/g1_test.go b/ecc/secp256k1/g1_test.go index f55a78e7c..3afb33c58 100644 --- a/ecc/secp256k1/g1_test.go +++ b/ecc/secp256k1/g1_test.go @@ -679,20 +679,11 @@ func BenchmarkG1JacScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd G1Jac - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&g1Gen, &scalar) - } - }) - - var glv G1Jac + var point G1Jac b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&g1Gen, &scalar) + point.ScalarMultiplication(&g1Gen, &scalar) } }) diff --git a/ecc/secp256k1/multiexp_test.go b/ecc/secp256k1/multiexp_test.go index 87cbd1575..fb679f2ed 100644 --- a/ecc/secp256k1/multiexp_test.go +++ b/ecc/secp256k1/multiexp_test.go @@ -19,7 +19,6 @@ package secp256k1 import ( "fmt" "math/big" - "math/bits" "math/rand" "runtime" "sync" @@ -315,43 +314,22 @@ func _innerMsmG1Reference(p *G1Jac, points []G1Affine, scalars []fr.Element, con func BenchmarkMultiExpG1(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( - samplePoints [nbSamples]G1Affine - sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element + samplePoints [nbSamples]G1Affine + sampleScalars [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:], sampleScalars[:]) - copy(sampleScalarsRedundant[:], sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i := 0; i < len(sampleScalarsSmallValues); i++ { - if i%5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i := 0; i < len(sampleScalarsRedundant); i += 100 { - for j := i + 1; j < i+100 && j < len(sampleScalarsRedundant); j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBasesG1(samplePoints[:]) var testPoint G1Affine - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -360,20 +338,6 @@ func BenchmarkMultiExpG1(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using], ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using], ecc.MultiExpConfig{}) - } - }) } } diff --git a/internal/generator/ecc/template/tests/multiexp.go.tmpl b/internal/generator/ecc/template/tests/multiexp.go.tmpl index 3feb22ffe..a0dd0bd06 100644 --- a/internal/generator/ecc/template/tests/multiexp.go.tmpl +++ b/internal/generator/ecc/template/tests/multiexp.go.tmpl @@ -14,7 +14,6 @@ import ( "math/rand" "math/big" "testing" - "math/bits" "sync" "github.com/consensys/gnark-crypto/ecc" @@ -343,44 +342,22 @@ func _innerMsm{{ $.UPointName }}Reference(p *{{ $.TJacobian }}, points []{{ $.TA func BenchmarkMultiExp{{ $.UPointName }}(b *testing.B) { const ( - pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + pow = 21 nbSamples = 1 << pow ) var ( samplePoints [nbSamples]{{ $.TAffine }} sampleScalars [nbSamples]fr.Element - sampleScalarsSmallValues [nbSamples]fr.Element - sampleScalarsRedundant [nbSamples]fr.Element ) fillBenchScalars(sampleScalars[:]) - copy(sampleScalarsSmallValues[:],sampleScalars[:]) - copy(sampleScalarsRedundant[:],sampleScalars[:]) - - // this means first chunk is going to have more work to do and should be split into several go routines - for i:=0; i < len(sampleScalarsSmallValues);i++ { - if i % 5 == 0 { - sampleScalarsSmallValues[i].SetZero() - sampleScalarsSmallValues[i][0] = 1 - } - } - - // bad case for batch affine because scalar distribution might look uniform - // but over batchSize windows, we may hit a lot of conflicts and force the msm-affine - // to process small batches of additions to flush its queue of conflicted points. - for i:=0; i < len(sampleScalarsRedundant);i+=100 { - for j:=i+1; j < i+100 && j < len(sampleScalarsRedundant);j++ { - sampleScalarsRedundant[j] = sampleScalarsRedundant[i] - } - } fillBenchBases{{ $.UPointName }}(samplePoints[:]) - var testPoint {{ $.TAffine }} - for i := 5; i <= pow; i++ { + for i := 1; i <= pow; i++ { using := 1 << i b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { @@ -389,20 +366,6 @@ func BenchmarkMultiExp{{ $.UPointName }}(b *testing.B) { testPoint.MultiExp(samplePoints[:using], sampleScalars[:using],ecc.MultiExpConfig{}) } }) - - b.Run(fmt.Sprintf("%d points-smallvalues", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsSmallValues[:using],ecc.MultiExpConfig{}) - } - }) - - b.Run(fmt.Sprintf("%d points-redundancy", using), func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - testPoint.MultiExp(samplePoints[:using], sampleScalarsRedundant[:using],ecc.MultiExpConfig{}) - } - }) } } diff --git a/internal/generator/ecc/template/tests/point.go.tmpl b/internal/generator/ecc/template/tests/point.go.tmpl index 432fec1dc..0440216c1 100644 --- a/internal/generator/ecc/template/tests/point.go.tmpl +++ b/internal/generator/ecc/template/tests/point.go.tmpl @@ -786,24 +786,13 @@ func Benchmark{{ $TJacobian }}ScalarMultiplication(b *testing.B) { scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) scalar.Add(&scalar, r) - var doubleAndAdd {{ $TJacobian }} - - b.Run("double and add", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - doubleAndAdd.mulWindowed(&{{.PointName}}Gen, &scalar) - } - }) - - {{if .GLV}} - var glv {{ $TJacobian }} + var point {{ $TJacobian }} b.Run("GLV", func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - glv.mulGLV(&{{.PointName}}Gen, &scalar) + point.ScalarMultiplication(&{{.PointName}}Gen, &scalar) } }) - {{end}} } diff --git a/internal/generator/pairing/template/tests/pairing.go.tmpl b/internal/generator/pairing/template/tests/pairing.go.tmpl index a05c5e4ea..2883f89ec 100644 --- a/internal/generator/pairing/template/tests/pairing.go.tmpl +++ b/internal/generator/pairing/template/tests/pairing.go.tmpl @@ -467,18 +467,19 @@ func BenchmarkMultiPair(b *testing.B) { g1GenAff.FromJacobian(&g1Gen) g2GenAff.FromJacobian(&g2Gen) - n := 10 - P := make([]G1Affine, n) - Q := make([]G2Affine, n) + const pow = 10 - for i := 2; i <= n; i++ { - for j := 0; j < i; j++ { + for i := 4; i <= pow; i++ { + using := 1 << i + P := make([]G1Affine, using) + Q := make([]G2Affine, using) + for j := 0; j < using; j++ { P[j].Set(&g1GenAff) Q[j].Set(&g2GenAff) } - b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.Run(fmt.Sprintf("%d pairs", using), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for k := 0; k < b.N; k++ { Pair(P, Q) } }) diff --git a/zkalc.sh b/zkalc.sh new file mode 100755 index 000000000..f72a61233 --- /dev/null +++ b/zkalc.sh @@ -0,0 +1,65 @@ + +zkalc_benchmarks () { + pushd $1 + + pushd fr + + go test -run none -bench BenchmarkElementAdd + go test -run none -bench 'BenchmarkElementMul\b' + go test -run none -bench BenchmarkElementInverse + + go test -v -run none -bench BenchmarkElementSquare + go test -v -run none -bench BenchmarkElementSqrt + + popd + + if [ -d fr/fft ] + then + pushd fr/fft + + go test -run none -bench BenchmarkFFT + + popd + fi + + go test -run none -bench 'BenchmarkG1JacAdd\b' + go test -run none -bench 'BenchmarkG1JacScalarMultiplication\b' + go test -run none -bench 'BenchmarkMultiExpG1\b' + + go test -run none -bench BenchmarkG1JacIsInSubGroup + go test -run none -bench BenchmarkG1AffineCofactorClearing + + go test -run none -bench 'BenchmarkG2JacAdd\b' + go test -run none -bench 'BenchmarkG2JacScalarMultiplication\b' + go test -run none -bench 'BenchmarkMultiExpG2\b' + + go test -run none -bench BenchmarkG2JacIsInSubGroup + go test -run none -bench BenchmarkG2AffineCofactorClearing + + if [ -d internal/fptower ] + then + pushd internal/fptower + + go test -run none -bench BenchmarkE12Add + go test -run none -bench BenchmarkE12Mul + go test -run none -bench BenchmarkE12Cyclosquare + + popd + fi + + go test -run none -bench BenchmarkPairing + go test -run none -bench BenchmarkMultiPair + + popd +} + + +if test "$#" -ne 1; then + echo "Usage: zkalc.sh " + echo "For example: 'bash zkalc.sh bls12-381')" + exit +fi + +pushd ecc +zkalc_benchmarks $1 +popd