Files
hpc/internal/store/upload_store_test.go
dailz bf89de12f0 feat(store): add blob, file, folder, and upload stores
Add BlobStore (ref counting), FileStore (soft delete + pagination), FolderStore (materialized path), UploadStore (idempotent upsert), and update AutoMigrate.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-04-15 09:22:44 +08:00

330 lines
9.7 KiB
Go

package store
import (
"context"
"testing"
"time"
"gcy_hpc_server/internal/model"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
func setupUploadTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.UploadSession{}, &model.UploadChunk{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func newTestSession(status string, expiresAt time.Time) *model.UploadSession {
return &model.UploadSession{
FileName: "test.bin",
FileSize: 100 * 1024 * 1024,
ChunkSize: 16 << 20,
TotalChunks: 7,
SHA256: "abc123",
Status: status,
MinioPrefix: "uploads/1/",
ExpiresAt: expiresAt,
}
}
func newTestChunk(sessionID int64, index int, status string) *model.UploadChunk {
return &model.UploadChunk{
SessionID: sessionID,
ChunkIndex: index,
MinioKey: "uploads/1/chunk_00000",
SHA256: "chunk_hash_0",
Size: 16 << 20,
Status: status,
}
}
func TestUploadStore_CreateSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
if err := s.CreateSession(context.Background(), session); err != nil {
t.Fatalf("CreateSession() error = %v", err)
}
if session.ID <= 0 {
t.Errorf("ID = %d, want positive", session.ID)
}
if session.FileName != "test.bin" {
t.Errorf("FileName = %q, want %q", session.FileName, "test.bin")
}
if session.ExpiresAt.IsZero() {
t.Error("ExpiresAt is zero, want a real timestamp")
}
}
func TestUploadStore_GetSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
got, err := s.GetSession(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetSession() error = %v", err)
}
if got == nil {
t.Fatal("GetSession() returned nil")
}
if got.FileName != session.FileName {
t.Errorf("FileName = %q, want %q", got.FileName, session.FileName)
}
if got.Status != "pending" {
t.Errorf("Status = %q, want %q", got.Status, "pending")
}
}
func TestUploadStore_GetSession_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
got, err := s.GetSession(context.Background(), 99999)
if err != nil {
t.Fatalf("GetSession() error = %v", err)
}
if got != nil {
t.Error("GetSession() expected nil for not-found")
}
}
func TestUploadStore_UpdateSessionStatus(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
if err := s.UpdateSessionStatus(context.Background(), session.ID, "uploading"); err != nil {
t.Fatalf("UpdateSessionStatus() error = %v", err)
}
got, _ := s.GetSession(context.Background(), session.ID)
if got.Status != "uploading" {
t.Errorf("Status = %q, want %q", got.Status, "uploading")
}
}
func TestUploadStore_UpdateSessionStatus_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
err := s.UpdateSessionStatus(context.Background(), 99999, "uploading")
if err == nil {
t.Fatal("UpdateSessionStatus() expected error for not-found, got nil")
}
}
func TestUploadStore_GetSessionWithChunks(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "h0", Size: 16 << 20, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "uploads/1/chunk_2", SHA256: "h2", Size: 16 << 20, Status: "uploaded",
})
gotSession, chunks, err := s.GetSessionWithChunks(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetSessionWithChunks() error = %v", err)
}
if gotSession == nil {
t.Fatal("GetSessionWithChunks() session is nil")
}
if len(chunks) != 2 {
t.Fatalf("len(chunks) = %d, want 2", len(chunks))
}
if chunks[0].ChunkIndex != 0 {
t.Errorf("chunks[0].ChunkIndex = %d, want 0", chunks[0].ChunkIndex)
}
if chunks[1].ChunkIndex != 2 {
t.Errorf("chunks[1].ChunkIndex = %d, want 2", chunks[1].ChunkIndex)
}
}
func TestUploadStore_GetSessionWithChunks_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
gotSession, chunks, err := s.GetSessionWithChunks(context.Background(), 99999)
if err != nil {
t.Fatalf("GetSessionWithChunks() error = %v", err)
}
if gotSession != nil {
t.Error("expected nil session for not-found")
}
if chunks != nil {
t.Error("expected nil chunks for not-found")
}
}
func TestUploadStore_UpsertChunk_Idempotent(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
chunk := &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "hash_v1", Size: 1024, Status: "uploaded",
}
if err := s.UpsertChunk(context.Background(), chunk); err != nil {
t.Fatalf("first UpsertChunk() error = %v", err)
}
chunk2 := &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "hash_v2", Size: 2048, Status: "uploaded",
}
if err := s.UpsertChunk(context.Background(), chunk2); err != nil {
t.Fatalf("second UpsertChunk() error = %v", err)
}
indices, _ := s.GetUploadedChunkIndices(context.Background(), session.ID)
if len(indices) != 1 {
t.Errorf("len(indices) = %d, want 1 (idempotent)", len(indices))
}
var got model.UploadChunk
db.Where("session_id = ? AND chunk_index = ?", session.ID, 0).First(&got)
if got.SHA256 != "hash_v2" {
t.Errorf("SHA256 = %q, want %q (updated on conflict)", got.SHA256, "hash_v2")
}
}
func TestUploadStore_GetUploadedChunkIndices(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "pending",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "k2", Size: 100, Status: "uploaded",
})
indices, err := s.GetUploadedChunkIndices(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetUploadedChunkIndices() error = %v", err)
}
if len(indices) != 2 {
t.Fatalf("len(indices) = %d, want 2", len(indices))
}
if indices[0] != 0 || indices[1] != 2 {
t.Errorf("indices = %v, want [0 2]", indices)
}
}
func TestUploadStore_CountUploadedChunks(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "k2", Size: 100, Status: "pending",
})
count, err := s.CountUploadedChunks(context.Background(), session.ID)
if err != nil {
t.Fatalf("CountUploadedChunks() error = %v", err)
}
if count != 2 {
t.Errorf("count = %d, want 2", count)
}
}
func TestUploadStore_ListExpiredSessions(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
past := time.Now().Add(-1 * time.Hour)
future := time.Now().Add(48 * time.Hour)
expired := newTestSession("pending", past)
expired.FileName = "expired.bin"
s.CreateSession(context.Background(), expired)
active := newTestSession("uploading", future)
active.FileName = "active.bin"
s.CreateSession(context.Background(), active)
completed := newTestSession("completed", past)
completed.FileName = "completed.bin"
s.CreateSession(context.Background(), completed)
sessions, err := s.ListExpiredSessions(context.Background())
if err != nil {
t.Fatalf("ListExpiredSessions() error = %v", err)
}
if len(sessions) != 1 {
t.Fatalf("len(sessions) = %d, want 1", len(sessions))
}
if sessions[0].FileName != "expired.bin" {
t.Errorf("FileName = %q, want %q", sessions[0].FileName, "expired.bin")
}
}
func TestUploadStore_DeleteSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "uploaded",
})
if err := s.DeleteSession(context.Background(), session.ID); err != nil {
t.Fatalf("DeleteSession() error = %v", err)
}
got, _ := s.GetSession(context.Background(), session.ID)
if got != nil {
t.Error("session still exists after delete")
}
var chunkCount int64
db.Model(&model.UploadChunk{}).Where("session_id = ?", session.ID).Count(&chunkCount)
if chunkCount != 0 {
t.Errorf("chunkCount = %d, want 0 after delete", chunkCount)
}
}