diff --git a/beacon-chain/db/filesystem/blob.go b/beacon-chain/db/filesystem/blob.go index c4fa3a689c9d..a96e7ba9286c 100644 --- a/beacon-chain/db/filesystem/blob.go +++ b/beacon-chain/db/filesystem/blob.go @@ -120,7 +120,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error { if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil { return err } - partPath := fname.partPath() + partPath := fname.partPath(fmt.Sprintf("%p", sidecarData)) partialMoved := false // Ensure the partial file is deleted. @@ -270,16 +270,12 @@ func (p blobNamer) dir() string { return rootString(p.root) } -func (p blobNamer) fname(ext string) string { - return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext)) -} - -func (p blobNamer) partPath() string { - return p.fname(partExt) +func (p blobNamer) partPath(entropy string) string { + return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt)) } func (p blobNamer) path() string { - return p.fname(sszExt) + return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt)) } func rootString(root [32]byte) string { diff --git a/beacon-chain/db/filesystem/blob_test.go b/beacon-chain/db/filesystem/blob_test.go index 87b57d21e413..9efe799d8e87 100644 --- a/beacon-chain/db/filesystem/blob_test.go +++ b/beacon-chain/db/filesystem/blob_test.go @@ -4,6 +4,7 @@ import ( "bytes" "os" "path" + "sync" "testing" "time" @@ -101,6 +102,30 @@ func TestBlobStorage_SaveBlobData(t *testing.T) { _, err = b.Get(blob.BlockRoot(), blob.Index) require.ErrorIs(t, err, os.ErrNotExist) }) + + t.Run("race conditions", func(t *testing.T) { + // There was a bug where saving the same blob in multiple go routines would cause a partial blob + // to be empty. This test ensures that several routines can safely save the same blob at the + // same time. This isn't ideal behavior from the caller, but should be handled safely anyway. + // See https://github.com/prysmaticlabs/prysm/pull/13648 + b, err := NewBlobStorage(t.TempDir()) + require.NoError(t, err) + blob := testSidecars[0] + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + require.NoError(t, b.Save(blob)) + }() + } + + wg.Wait() + res, err := b.Get(blob.BlockRoot(), blob.Index) + require.NoError(t, err) + require.DeepSSZEqual(t, blob, res) + }) } // pollUntil polls a condition function until it returns true or a timeout is reached.