feat(service): add upload, download, file, and folder services

Add UploadService (dedup, chunk lifecycle, ComposeObject), DownloadService (Range support), FileService (ref counting), FolderService (path validation).

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
dailz
2026-04-15 09:23:09 +08:00
parent a114821615
commit f0847d3978
8 changed files with 2511 additions and 0 deletions

View File

@@ -0,0 +1,678 @@
package service
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"strings"
"testing"
"time"
"gcy_hpc_server/internal/config"
"gcy_hpc_server/internal/model"
"gcy_hpc_server/internal/storage"
"gcy_hpc_server/internal/store"
"go.uber.org/zap"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
type uploadMockStorage struct {
putObjectFn func(ctx context.Context, bucket, key string, reader io.Reader, size int64, opts storage.PutObjectOptions) (storage.UploadInfo, error)
composeObjectFn func(ctx context.Context, bucket, dst string, sources []string) (storage.UploadInfo, error)
listObjectsFn func(ctx context.Context, bucket, prefix string, recursive bool) ([]storage.ObjectInfo, error)
removeObjectsFn func(ctx context.Context, bucket string, keys []string, opts storage.RemoveObjectsOptions) error
removeObjectFn func(ctx context.Context, bucket, key string, opts storage.RemoveObjectOptions) error
getObjectFn func(ctx context.Context, bucket, key string, opts storage.GetOptions) (io.ReadCloser, storage.ObjectInfo, error)
bucketExistsFn func(ctx context.Context, bucket string) (bool, error)
makeBucketFn func(ctx context.Context, bucket string, opts storage.MakeBucketOptions) error
statObjectFn func(ctx context.Context, bucket, key string, opts storage.StatObjectOptions) (storage.ObjectInfo, error)
abortMultipartFn func(ctx context.Context, bucket, object, uploadID string) error
removeIncompleteFn func(ctx context.Context, bucket, object string) error
}
func (m *uploadMockStorage) PutObject(ctx context.Context, bucket, key string, reader io.Reader, size int64, opts storage.PutObjectOptions) (storage.UploadInfo, error) {
if m.putObjectFn != nil {
return m.putObjectFn(ctx, bucket, key, reader, size, opts)
}
io.Copy(io.Discard, reader)
return storage.UploadInfo{ETag: "etag", Size: size}, nil
}
func (m *uploadMockStorage) GetObject(ctx context.Context, bucket, key string, opts storage.GetOptions) (io.ReadCloser, storage.ObjectInfo, error) {
if m.getObjectFn != nil {
return m.getObjectFn(ctx, bucket, key, opts)
}
return nil, storage.ObjectInfo{}, nil
}
func (m *uploadMockStorage) ComposeObject(ctx context.Context, bucket, dst string, sources []string) (storage.UploadInfo, error) {
if m.composeObjectFn != nil {
return m.composeObjectFn(ctx, bucket, dst, sources)
}
return storage.UploadInfo{ETag: "composed", Size: 0}, nil
}
func (m *uploadMockStorage) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
if m.abortMultipartFn != nil {
return m.abortMultipartFn(ctx, bucket, object, uploadID)
}
return nil
}
func (m *uploadMockStorage) RemoveIncompleteUpload(ctx context.Context, bucket, object string) error {
if m.removeIncompleteFn != nil {
return m.removeIncompleteFn(ctx, bucket, object)
}
return nil
}
func (m *uploadMockStorage) RemoveObject(ctx context.Context, bucket, key string, opts storage.RemoveObjectOptions) error {
if m.removeObjectFn != nil {
return m.removeObjectFn(ctx, bucket, key, opts)
}
return nil
}
func (m *uploadMockStorage) ListObjects(ctx context.Context, bucket, prefix string, recursive bool) ([]storage.ObjectInfo, error) {
if m.listObjectsFn != nil {
return m.listObjectsFn(ctx, bucket, prefix, recursive)
}
return nil, nil
}
func (m *uploadMockStorage) RemoveObjects(ctx context.Context, bucket string, keys []string, opts storage.RemoveObjectsOptions) error {
if m.removeObjectsFn != nil {
return m.removeObjectsFn(ctx, bucket, keys, opts)
}
return nil
}
func (m *uploadMockStorage) BucketExists(ctx context.Context, bucket string) (bool, error) {
if m.bucketExistsFn != nil {
return m.bucketExistsFn(ctx, bucket)
}
return true, nil
}
func (m *uploadMockStorage) MakeBucket(ctx context.Context, bucket string, opts storage.MakeBucketOptions) error {
if m.makeBucketFn != nil {
return m.makeBucketFn(ctx, bucket, opts)
}
return nil
}
func (m *uploadMockStorage) StatObject(ctx context.Context, bucket, key string, opts storage.StatObjectOptions) (storage.ObjectInfo, error) {
if m.statObjectFn != nil {
return m.statObjectFn(ctx, bucket, key, opts)
}
return storage.ObjectInfo{}, nil
}
func setupUploadTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.FileBlob{}, &model.File{}, &model.UploadSession{}, &model.UploadChunk{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func uploadTestConfig() config.MinioConfig {
return config.MinioConfig{
Bucket: "test-bucket",
ChunkSize: 16 << 20,
MaxFileSize: 50 << 30,
MinChunkSize: 5 << 20,
SessionTTL: 48,
}
}
func newUploadTestService(t *testing.T, st storage.ObjectStorage, db *gorm.DB) *UploadService {
t.Helper()
return NewUploadService(
st,
store.NewBlobStore(db),
store.NewFileStore(db),
store.NewUploadStore(db),
uploadTestConfig(),
db,
zap.NewNop(),
)
}
func sha256Of(data []byte) string {
h := sha256.Sum256(data)
return hex.EncodeToString(h[:])
}
func TestInitUpload_CreatesSession(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, err := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.txt",
FileSize: 32 << 20,
SHA256: "abc123",
})
if err != nil {
t.Fatalf("InitUpload: %v", err)
}
sessResp, ok := resp.(model.UploadSessionResponse)
if !ok {
t.Fatalf("expected UploadSessionResponse, got %T", resp)
}
if sessResp.Status != "pending" {
t.Errorf("status = %q, want pending", sessResp.Status)
}
if sessResp.TotalChunks != 2 {
t.Errorf("TotalChunks = %d, want 2", sessResp.TotalChunks)
}
if sessResp.FileName != "test.txt" {
t.Errorf("FileName = %q, want test.txt", sessResp.FileName)
}
}
func TestInitUpload_DedupBlobExists(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
blobSHA := sha256Of([]byte("hello"))
blob := &model.FileBlob{
SHA256: blobSHA,
MinioKey: "files/" + blobSHA,
FileSize: 5,
MimeType: "text/plain",
RefCount: 1,
}
if err := db.Create(blob).Error; err != nil {
t.Fatalf("create blob: %v", err)
}
resp, err := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "mydoc.txt",
FileSize: 5,
SHA256: blobSHA,
})
if err != nil {
t.Fatalf("InitUpload: %v", err)
}
fileResp, ok := resp.(model.FileResponse)
if !ok {
t.Fatalf("expected FileResponse, got %T", resp)
}
if fileResp.Name != "mydoc.txt" {
t.Errorf("Name = %q, want mydoc.txt", fileResp.Name)
}
if fileResp.SHA256 != blobSHA {
t.Errorf("SHA256 mismatch")
}
var after model.FileBlob
db.First(&after, blob.ID)
if after.RefCount != 2 {
t.Errorf("RefCount = %d, want 2", after.RefCount)
}
}
func TestInitUpload_ChunkSizeTooSmall(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
tiny := int64(1024)
_, err := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.txt",
FileSize: 10 << 20,
SHA256: "abc",
ChunkSize: &tiny,
})
if err == nil {
t.Fatal("expected error for chunk size too small")
}
if !strings.Contains(err.Error(), "below minimum") {
t.Errorf("error = %q, want 'below minimum'", err.Error())
}
}
func TestInitUpload_TooManyChunks(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
cfg := uploadTestConfig()
cfg.ChunkSize = 1
cfg.MinChunkSize = 1
svc2 := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
_, err := svc2.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "big.txt",
FileSize: 10002,
SHA256: "abc",
})
if err == nil {
t.Fatal("expected error for too many chunks")
}
if !strings.Contains(err.Error(), "exceeds limit") {
t.Errorf("error = %q, want 'exceeds limit'", err.Error())
}
}
func TestInitUpload_DangerousFilename(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
for _, name := range []string{"", "..", "foo/bar", "foo\\bar", " name"} {
_, err := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: name,
FileSize: 100,
SHA256: "abc",
})
if err == nil {
t.Errorf("expected error for filename %q", name)
}
}
}
func TestUploadChunk_UploadsAndStoresSHA256(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, _ := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.bin",
FileSize: 10 << 20,
SHA256: "deadbeef",
})
sessResp := resp.(model.UploadSessionResponse)
data := []byte("chunk data here")
chunkSHA := sha256Of(data)
err := svc.UploadChunk(context.Background(), sessResp.ID, 0, bytes.NewReader(data), int64(len(data)))
if err != nil {
t.Fatalf("UploadChunk: %v", err)
}
var chunk model.UploadChunk
db.Where("session_id = ? AND chunk_index = ?", sessResp.ID, 0).First(&chunk)
if chunk.SHA256 != chunkSHA {
t.Errorf("chunk SHA256 = %q, want %q", chunk.SHA256, chunkSHA)
}
if chunk.Status != "uploaded" {
t.Errorf("chunk status = %q, want uploaded", chunk.Status)
}
session, _ := svc.uploadStore.GetSession(context.Background(), sessResp.ID)
if session.Status != "uploading" {
t.Errorf("session status = %q, want uploading", session.Status)
}
}
func TestUploadChunk_RejectsCompletedSession(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, _ := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.bin",
FileSize: 10 << 20,
SHA256: "deadbeef",
})
sessResp := resp.(model.UploadSessionResponse)
svc.uploadStore.UpdateSessionStatus(context.Background(), sessResp.ID, "completed")
err := svc.UploadChunk(context.Background(), sessResp.ID, 0, bytes.NewReader([]byte("x")), 1)
if err == nil {
t.Fatal("expected error for completed session")
}
if !strings.Contains(err.Error(), "cannot upload") {
t.Errorf("error = %q", err.Error())
}
}
func TestUploadChunk_RejectsExpiredSession(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, _ := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.bin",
FileSize: 10 << 20,
SHA256: "deadbeef",
})
sessResp := resp.(model.UploadSessionResponse)
svc.uploadStore.UpdateSessionStatus(context.Background(), sessResp.ID, "expired")
err := svc.UploadChunk(context.Background(), sessResp.ID, 0, bytes.NewReader([]byte("x")), 1)
if err == nil {
t.Fatal("expected error for expired session")
}
}
func TestGetUploadStatus_ReturnsChunkIndices(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, _ := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "test.bin",
FileSize: 32 << 20,
SHA256: "abc",
})
sessResp := resp.(model.UploadSessionResponse)
data := []byte("chunk0data")
svc.UploadChunk(context.Background(), sessResp.ID, 0, bytes.NewReader(data), int64(len(data)))
status, err := svc.GetUploadStatus(context.Background(), sessResp.ID)
if err != nil {
t.Fatalf("GetUploadStatus: %v", err)
}
if len(status.UploadedChunks) != 1 || status.UploadedChunks[0] != 0 {
t.Errorf("UploadedChunks = %v, want [0]", status.UploadedChunks)
}
if status.TotalChunks != 2 {
t.Errorf("TotalChunks = %d, want 2", status.TotalChunks)
}
}
func TestCompleteUpload_CreatesBlobAndFile(t *testing.T) {
db := setupUploadTestDB(t)
cfg := uploadTestConfig()
cfg.ChunkSize = 5 << 20
cfg.MinChunkSize = 1
st := &uploadMockStorage{}
svc := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
fileSHA := "aaa111"
sess := &model.UploadSession{
FileName: "test.bin",
FileSize: 10 << 20,
ChunkSize: 5 << 20,
TotalChunks: 2,
SHA256: fileSHA,
Status: "uploading",
MinioPrefix: "uploads/testcomplete/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 0, MinioKey: "uploads/testcomplete/chunk_00000", SHA256: "c0", Size: 5 << 20, Status: "uploaded"})
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 1, MinioKey: "uploads/testcomplete/chunk_00001", SHA256: "c1", Size: 5 << 20, Status: "uploaded"})
fileResp, err := svc.CompleteUpload(context.Background(), sess.ID)
if err != nil {
t.Fatalf("CompleteUpload: %v", err)
}
if fileResp.Name != "test.bin" {
t.Errorf("Name = %q, want test.bin", fileResp.Name)
}
if fileResp.SHA256 != fileSHA {
t.Errorf("SHA256 = %q, want %q", fileResp.SHA256, fileSHA)
}
var blob model.FileBlob
db.Where("sha256 = ?", fileSHA).First(&blob)
if blob.RefCount != 1 {
t.Errorf("blob RefCount = %d, want 1", blob.RefCount)
}
session, _ := svc.uploadStore.GetSession(context.Background(), sess.ID)
if session == nil {
t.Fatal("session should exist")
}
if session.Status != "completed" {
t.Errorf("session status = %q, want completed", session.Status)
}
}
func TestCompleteUpload_ReusesExistingBlob(t *testing.T) {
db := setupUploadTestDB(t)
cfg := uploadTestConfig()
cfg.ChunkSize = 5 << 20
cfg.MinChunkSize = 1
st := &uploadMockStorage{}
composeCalled := false
st.composeObjectFn = func(ctx context.Context, bucket, dst string, sources []string) (storage.UploadInfo, error) {
composeCalled = true
return storage.UploadInfo{}, nil
}
svc := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
fileSHA := "reuse123"
db.Create(&model.FileBlob{
SHA256: fileSHA,
MinioKey: "files/" + fileSHA,
FileSize: 10 << 20,
MimeType: "application/octet-stream",
RefCount: 1,
})
sess := &model.UploadSession{
FileName: "reuse.bin",
FileSize: 10 << 20,
ChunkSize: 5 << 20,
TotalChunks: 2,
SHA256: fileSHA,
Status: "uploading",
MinioPrefix: "uploads/reuse/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 0, MinioKey: "uploads/reuse/chunk_00000", SHA256: "c0", Size: 5 << 20, Status: "uploaded"})
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 1, MinioKey: "uploads/reuse/chunk_00001", SHA256: "c1", Size: 5 << 20, Status: "uploaded"})
fileResp, err := svc.CompleteUpload(context.Background(), sess.ID)
if err != nil {
t.Fatalf("CompleteUpload: %v", err)
}
if fileResp.SHA256 != fileSHA {
t.Errorf("SHA256 mismatch")
}
if composeCalled {
t.Error("ComposeObject should not be called when blob exists")
}
var blob model.FileBlob
db.Where("sha256 = ?", fileSHA).First(&blob)
if blob.RefCount != 2 {
t.Errorf("RefCount = %d, want 2", blob.RefCount)
}
}
func TestCompleteUpload_NotAllChunks(t *testing.T) {
db := setupUploadTestDB(t)
cfg := uploadTestConfig()
cfg.ChunkSize = 5 << 20
cfg.MinChunkSize = 1
st := &uploadMockStorage{}
svc := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
sess := &model.UploadSession{
FileName: "partial.bin",
FileSize: 10 << 20,
ChunkSize: 5 << 20,
TotalChunks: 2,
SHA256: "partial123",
Status: "uploading",
MinioPrefix: "uploads/partial/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 0, MinioKey: "uploads/partial/chunk_00000", SHA256: "c0", Size: 5 << 20, Status: "uploaded"})
_, err := svc.CompleteUpload(context.Background(), sess.ID)
if err == nil {
t.Fatal("expected error for incomplete chunks")
}
if !strings.Contains(err.Error(), "not all chunks uploaded") {
t.Errorf("error = %q", err.Error())
}
}
func TestCompleteUpload_ComposeObjectFails(t *testing.T) {
db := setupUploadTestDB(t)
cfg := uploadTestConfig()
cfg.ChunkSize = 5 << 20
cfg.MinChunkSize = 1
st := &uploadMockStorage{}
st.composeObjectFn = func(ctx context.Context, bucket, dst string, sources []string) (storage.UploadInfo, error) {
return storage.UploadInfo{}, fmt.Errorf("compose failed")
}
svc := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
fileSHA := "fail123"
sess := &model.UploadSession{
FileName: "fail.bin",
FileSize: 10 << 20,
ChunkSize: 5 << 20,
TotalChunks: 2,
SHA256: fileSHA,
Status: "uploading",
MinioPrefix: "uploads/fail/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 0, MinioKey: "uploads/fail/chunk_00000", SHA256: "c0", Size: 5 << 20, Status: "uploaded"})
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 1, MinioKey: "uploads/fail/chunk_00001", SHA256: "c1", Size: 5 << 20, Status: "uploaded"})
_, err := svc.CompleteUpload(context.Background(), sess.ID)
if err == nil {
t.Fatal("expected error for compose failure")
}
session, _ := svc.uploadStore.GetSession(context.Background(), sess.ID)
if session.Status != "failed" {
t.Errorf("session status = %q, want failed", session.Status)
}
}
func TestCompleteUpload_RetriesFailedSession(t *testing.T) {
db := setupUploadTestDB(t)
cfg := uploadTestConfig()
cfg.ChunkSize = 5 << 20
cfg.MinChunkSize = 1
st := &uploadMockStorage{}
svc := NewUploadService(st, store.NewBlobStore(db), store.NewFileStore(db), store.NewUploadStore(db), cfg, db, zap.NewNop())
fileSHA := "retry123"
sess := &model.UploadSession{
FileName: "retry.bin",
FileSize: 10 << 20,
ChunkSize: 5 << 20,
TotalChunks: 2,
SHA256: fileSHA,
Status: "failed",
MinioPrefix: "uploads/retry/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 0, MinioKey: "uploads/retry/chunk_00000", SHA256: "c0", Size: 5 << 20, Status: "uploaded"})
db.Create(&model.UploadChunk{SessionID: sess.ID, ChunkIndex: 1, MinioKey: "uploads/retry/chunk_00001", SHA256: "c1", Size: 5 << 20, Status: "uploaded"})
fileResp, err := svc.CompleteUpload(context.Background(), sess.ID)
if err != nil {
t.Fatalf("CompleteUpload on retry: %v", err)
}
if fileResp.SHA256 != fileSHA {
t.Errorf("SHA256 mismatch")
}
session, _ := svc.uploadStore.GetSession(context.Background(), sess.ID)
if session.Status != "completed" {
t.Errorf("session status = %q, want completed", session.Status)
}
}
func TestCancelUpload_CleansUp(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
resp, _ := svc.InitUpload(context.Background(), model.InitUploadRequest{
FileName: "cancel.bin",
FileSize: 10 << 20,
SHA256: "cancel123",
})
sessResp := resp.(model.UploadSessionResponse)
err := svc.CancelUpload(context.Background(), sessResp.ID)
if err != nil {
t.Fatalf("CancelUpload: %v", err)
}
session, _ := svc.uploadStore.GetSession(context.Background(), sessResp.ID)
if session != nil {
t.Error("session should be deleted")
}
}
func TestZeroByteFile_CompletesImmediately(t *testing.T) {
db := setupUploadTestDB(t)
st := &uploadMockStorage{}
svc := newUploadTestService(t, st, db)
fileSHA := "empty123"
sess := &model.UploadSession{
FileName: "empty.bin",
FileSize: 0,
ChunkSize: 16 << 20,
TotalChunks: 0,
SHA256: fileSHA,
Status: "uploading",
MinioPrefix: "uploads/empty/",
MimeType: "application/octet-stream",
ExpiresAt: time.Now().Add(48 * time.Hour),
}
db.Create(sess)
fileResp, err := svc.CompleteUpload(context.Background(), sess.ID)
if err != nil {
t.Fatalf("CompleteUpload zero byte: %v", err)
}
if fileResp.SHA256 != fileSHA {
t.Errorf("SHA256 mismatch")
}
if fileResp.Size != 0 {
t.Errorf("Size = %d, want 0", fileResp.Size)
}
var blob model.FileBlob
db.Where("sha256 = ?", fileSHA).First(&blob)
if blob.RefCount != 1 {
t.Errorf("RefCount = %d, want 1", blob.RefCount)
}
uploadStore := store.NewUploadStore(db)
session, _ := uploadStore.GetSession(context.Background(), sess.ID)
if session == nil {
t.Fatal("session should exist")
}
if session.Status != "completed" {
t.Errorf("session status = %q, want completed", session.Status)
}
}