diff --git a/bindings/go/blst.go b/bindings/go/blst.go index 54baa873..71ecec13 100644 --- a/bindings/go/blst.go +++ b/bindings/go/blst.go @@ -1409,6 +1409,65 @@ func (p1 *P1Affine) Uncompress(in []byte) *P1Affine { } return p1 } + +func (dummy *P1Affine) BatchUncompress(in [][]byte) []*P1Affine { + // Allocate space for all of the resulting points. Later we'll save pointers + // and return those so that the result could be used in other functions, + // such as MultipleAggregateVerify. + n := len(in) + points := make([]P1Affine, n) + pointsPtrs := make([]*P1Affine, n) + + numCores := runtime.GOMAXPROCS(0) + numThreads := maxProcs + if numThreads > numCores { + numThreads = numCores + } + if numThreads > n { + numThreads = n + } + // Each thread will determine next message to process by atomically + // incrementing curItem, process corresponding point, and + // repeat until n is exceeded. Each thread will send a result (true for + // success, false for failure) into the channel when complete. + resCh := make(chan bool, numThreads) + valid := int32(1) + curItem := uint32(0) + for tid := 0; tid < numThreads; tid++ { + go func() { + for atomic.LoadInt32(&valid) > 0 { + // Get a work item + work := atomic.AddUint32(&curItem, 1) - 1 + if work >= uint32(n) { + break + } + if points[work].Uncompress(in[work]) == nil { + atomic.StoreInt32(&valid, 0) + break + } + pointsPtrs[work] = &points[work] + } + if atomic.LoadInt32(&valid) > 0 { + resCh <- true + } else { + resCh <- false + } + }() + } + + // Collect the threads + result := true + for i := 0; i < numThreads; i++ { + if !<-resCh { + result = false + } + } + if atomic.LoadInt32(&valid) == 0 || result == false { + return nil + } + return pointsPtrs +} + func (p1 *P1) Serialize() []byte { var out [BLST_P1_SERIALIZE_BYTES]byte C.blst_p1_serialize((*C.byte)(&out[0]), p1) @@ -1552,6 +1611,65 @@ func (p2 *P2Affine) Uncompress(in []byte) *P2Affine { } return p2 } + +func (dummy *P2Affine) BatchUncompress(in [][]byte) []*P2Affine { + // Allocate space for all of the resulting points. Later we'll save pointers + // and return those so that the result could be used in other functions, + // such as MultipleAggregateVerify. + n := len(in) + points := make([]P2Affine, n) + pointsPtrs := make([]*P2Affine, n) + + numCores := runtime.GOMAXPROCS(0) + numThreads := maxProcs + if numThreads > numCores { + numThreads = numCores + } + if numThreads > n { + numThreads = n + } + // Each thread will determine next message to process by atomically + // incrementing curItem, process corresponding point, and + // repeat until n is exceeded. Each thread will send a result (true for + // success, false for failure) into the channel when complete. + resCh := make(chan bool, numThreads) + valid := int32(1) + curItem := uint32(0) + for tid := 0; tid < numThreads; tid++ { + go func() { + for atomic.LoadInt32(&valid) > 0 { + // Get a work item + work := atomic.AddUint32(&curItem, 1) - 1 + if work >= uint32(n) { + break + } + if points[work].Uncompress(in[work]) == nil { + atomic.StoreInt32(&valid, 0) + break + } + pointsPtrs[work] = &points[work] + } + if atomic.LoadInt32(&valid) > 0 { + resCh <- true + } else { + resCh <- false + } + }() + } + + // Collect the threads + result := true + for i := 0; i < numThreads; i++ { + if !<-resCh { + result = false + } + } + if atomic.LoadInt32(&valid) == 0 || result == false { + return nil + } + return pointsPtrs +} + func (p2 *P2) Serialize() []byte { var out [BLST_P2_SERIALIZE_BYTES]byte C.blst_p2_serialize((*C.byte)(&out[0]), p2) diff --git a/bindings/go/blst_minpk_test.go b/bindings/go/blst_minpk_test.go index 8cdd1a2e..d42219a2 100644 --- a/bindings/go/blst_minpk_test.go +++ b/bindings/go/blst_minpk_test.go @@ -330,6 +330,28 @@ func TestSignMultipleVerifyAggregateMinPk(t *testing.T) { } } +func TestBatchUncompressMinPk(t *testing.T) { + size := 128 + var points []*P2Affine + var compPoints [][]byte + + for i := 0; i < size; i++ { + msg := Message(fmt.Sprintf("blst is a blast!! %d", i)) + p2 := HashToG2(msg, dstMinPk).ToAffine() + points = append(points, p2) + compPoints = append(compPoints, p2.Compress()) + } + uncompPoints := new(SignatureMinPk).BatchUncompress(compPoints) + if uncompPoints == nil { + t.Errorf("BatchUncompress returned nil size %d", size) + } + for i := 0; i < size; i++ { + if !points[i].Equals(uncompPoints[i]) { + t.Errorf("Uncompressed point does not equal initial point %d", i) + } + } +} + func BenchmarkCoreSignMinPk(b *testing.B) { var ikm = [...]byte{ 0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a, @@ -499,3 +521,38 @@ func generateBatchTestDataUncompressedMinPk(size int) (sks []*SecretKey, agsig = agProj.ToAffine() return } + +func BenchmarkBatchUncompressMinPk(b *testing.B) { + size := 128 + var points []*P2Affine + var compPoints [][]byte + + for i := 0; i < size; i++ { + msg := Message(fmt.Sprintf("blst is a blast!! %d", i)) + p2 := HashToG2(msg, dstMinPk).ToAffine() + points = append(points, p2) + compPoints = append(compPoints, p2.Compress()) + } + b.Run("Single", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + var tmp SignatureMinPk + for i := 0; i < b.N; i++ { + for j := 0; j < size; j++ { + if tmp.Uncompress(compPoints[j]) == nil { + b.Fatal("could not uncompress point") + } + } + } + }) + b.Run("Batch", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + var tmp SignatureMinPk + for i := 0; i < b.N; i++ { + if tmp.BatchUncompress(compPoints) == nil { + b.Fatal("could not batch uncompress points") + } + } + }) +} diff --git a/bindings/go/blst_minsig_test.go b/bindings/go/blst_minsig_test.go index 6a67afc6..d3796517 100644 --- a/bindings/go/blst_minsig_test.go +++ b/bindings/go/blst_minsig_test.go @@ -334,6 +334,28 @@ func TestSignMultipleVerifyAggregateMinSig(t *testing.T) { } } +func TestBatchUncompressMinSig(t *testing.T) { + size := 128 + var points []*P1Affine + var compPoints [][]byte + + for i := 0; i < size; i++ { + msg := Message(fmt.Sprintf("blst is a blast!! %d", i)) + p1 := HashToG1(msg, dstMinSig).ToAffine() + points = append(points, p1) + compPoints = append(compPoints, p1.Compress()) + } + uncompPoints := new(SignatureMinSig).BatchUncompress(compPoints) + if uncompPoints == nil { + t.Errorf("BatchUncompress returned nil size %d", size) + } + for i := 0; i < size; i++ { + if !points[i].Equals(uncompPoints[i]) { + t.Errorf("Uncompressed point does not equal initial point %d", i) + } + } +} + func BenchmarkCoreSignMinSig(b *testing.B) { var ikm = [...]byte{ 0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a, @@ -503,3 +525,38 @@ func generateBatchTestDataUncompressedMinSig(size int) (sks []*SecretKey, agsig = agProj.ToAffine() return } + +func BenchmarkBatchUncompressMinSig(b *testing.B) { + size := 128 + var points []*P1Affine + var compPoints [][]byte + + for i := 0; i < size; i++ { + msg := Message(fmt.Sprintf("blst is a blast!! %d", i)) + p1 := HashToG1(msg, dstMinSig).ToAffine() + points = append(points, p1) + compPoints = append(compPoints, p1.Compress()) + } + b.Run("Single", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + var tmp SignatureMinSig + for i := 0; i < b.N; i++ { + for j := 0; j < size; j++ { + if tmp.Uncompress(compPoints[j]) == nil { + b.Fatal("could not uncompress point") + } + } + } + }) + b.Run("Batch", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + var tmp SignatureMinSig + for i := 0; i < b.N; i++ { + if tmp.BatchUncompress(compPoints) == nil { + b.Fatal("could not batch uncompress points") + } + } + }) +} diff --git a/bindings/go/blst_px.tgo b/bindings/go/blst_px.tgo index b9d9d8bf..a5a18d2b 100644 --- a/bindings/go/blst_px.tgo +++ b/bindings/go/blst_px.tgo @@ -58,6 +58,65 @@ func (p1 *P1Affine) Uncompress(in []byte) *P1Affine { } return p1 } + +func (dummy *P1Affine) BatchUncompress(in [][]byte) []*P1Affine { + // Allocate space for all of the resulting points. Later we'll save pointers + // and return those so that the result could be used in other functions, + // such as MultipleAggregateVerify. + n := len(in) + points := make([]P1Affine, n) + pointsPtrs := make([]*P1Affine, n) + + numCores := runtime.GOMAXPROCS(0) + numThreads := maxProcs + if numThreads > numCores { + numThreads = numCores + } + if numThreads > n { + numThreads = n + } + // Each thread will determine next message to process by atomically + // incrementing curItem, process corresponding point, and + // repeat until n is exceeded. Each thread will send a result (true for + // success, false for failure) into the channel when complete. + resCh := make(chan bool, numThreads) + valid := int32(1) + curItem := uint32(0) + for tid := 0; tid < numThreads; tid++ { + go func() { + for atomic.LoadInt32(&valid) > 0 { + // Get a work item + work := atomic.AddUint32(&curItem, 1) - 1 + if work >= uint32(n) { + break + } + if points[work].Uncompress(in[work]) == nil { + atomic.StoreInt32(&valid, 0) + break + } + pointsPtrs[work] = &points[work] + } + if atomic.LoadInt32(&valid) > 0 { + resCh <- true + } else { + resCh <- false + } + }() + } + + // Collect the threads + result := true + for i := 0; i < numThreads; i++ { + if ! <-resCh { + result = false + } + } + if atomic.LoadInt32(&valid) == 0 || result == false { + return nil + } + return pointsPtrs +} + func (p1 *P1) Serialize() []byte { var out [BLST_P1_SERIALIZE_BYTES]byte C.blst_p1_serialize((*C.byte)(&out[0]), p1)