feat(store): add blob, file, folder, and upload stores

Add BlobStore (ref counting), FileStore (soft delete + pagination), FolderStore (materialized path), UploadStore (idempotent upsert), and update AutoMigrate.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
dailz
2026-04-15 09:22:44 +08:00
parent c861ff3adf
commit bf89de12f0
9 changed files with 1442 additions and 1 deletions

View File

@@ -0,0 +1,99 @@
package store
import (
"context"
"errors"
"gcy_hpc_server/internal/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// BlobStore manages physical file blobs with reference counting.
type BlobStore struct {
db *gorm.DB
}
// NewBlobStore creates a new BlobStore.
func NewBlobStore(db *gorm.DB) *BlobStore {
return &BlobStore{db: db}
}
// Create inserts a new FileBlob record.
func (s *BlobStore) Create(ctx context.Context, blob *model.FileBlob) error {
return s.db.WithContext(ctx).Create(blob).Error
}
// GetBySHA256 returns the FileBlob with the given SHA256 hash.
// Returns (nil, nil) if not found.
func (s *BlobStore) GetBySHA256(ctx context.Context, sha256 string) (*model.FileBlob, error) {
var blob model.FileBlob
err := s.db.WithContext(ctx).Where("sha256 = ?", sha256).First(&blob).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &blob, nil
}
// IncrementRef atomically increments the ref_count for the blob with the given SHA256.
func (s *BlobStore) IncrementRef(ctx context.Context, sha256 string) error {
result := s.db.WithContext(ctx).Model(&model.FileBlob{}).
Where("sha256 = ?", sha256).
UpdateColumn("ref_count", gorm.Expr("ref_count + 1"))
if result.Error != nil {
return result.Error
}
if result.RowsAffected == 0 {
return gorm.ErrRecordNotFound
}
return nil
}
// DecrementRef atomically decrements the ref_count for the blob with the given SHA256.
// Returns the new ref_count after decrementing.
func (s *BlobStore) DecrementRef(ctx context.Context, sha256 string) (int64, error) {
result := s.db.WithContext(ctx).Model(&model.FileBlob{}).
Where("sha256 = ? AND ref_count > 0", sha256).
UpdateColumn("ref_count", gorm.Expr("ref_count - 1"))
if result.Error != nil {
return 0, result.Error
}
if result.RowsAffected == 0 {
return 0, gorm.ErrRecordNotFound
}
var blob model.FileBlob
if err := s.db.WithContext(ctx).Where("sha256 = ?", sha256).First(&blob).Error; err != nil {
return 0, err
}
return int64(blob.RefCount), nil
}
// Delete removes a FileBlob record by SHA256 (hard delete).
func (s *BlobStore) Delete(ctx context.Context, sha256 string) error {
result := s.db.WithContext(ctx).Where("sha256 = ?", sha256).Delete(&model.FileBlob{})
if result.Error != nil {
return result.Error
}
return nil
}
// GetBySHA256ForUpdate returns the FileBlob with a SELECT ... FOR UPDATE lock.
// Returns (nil, nil) if not found.
func (s *BlobStore) GetBySHA256ForUpdate(ctx context.Context, tx *gorm.DB, sha256 string) (*model.FileBlob, error) {
var blob model.FileBlob
err := tx.WithContext(ctx).
Clauses(clause.Locking{Strength: "UPDATE"}).
Where("sha256 = ?", sha256).First(&blob).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &blob, nil
}

View File

@@ -0,0 +1,146 @@
package store
import (
"context"
"testing"
"gcy_hpc_server/internal/model"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
func setupBlobTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.FileBlob{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func TestBlobStore_Create(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
blob := &model.FileBlob{
SHA256: "abc123",
MinioKey: "files/abc123",
FileSize: 1024,
MimeType: "application/octet-stream",
RefCount: 0,
}
if err := store.Create(ctx, blob); err != nil {
t.Fatalf("Create() error = %v", err)
}
if blob.ID == 0 {
t.Error("Create() did not set ID")
}
}
func TestBlobStore_GetBySHA256(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
store.Create(ctx, &model.FileBlob{SHA256: "abc", MinioKey: "files/abc", FileSize: 100, RefCount: 0})
blob, err := store.GetBySHA256(ctx, "abc")
if err != nil {
t.Fatalf("GetBySHA256() error = %v", err)
}
if blob == nil {
t.Fatal("GetBySHA256() returned nil")
}
if blob.SHA256 != "abc" {
t.Errorf("SHA256 = %q, want %q", blob.SHA256, "abc")
}
if blob.RefCount != 0 {
t.Errorf("RefCount = %d, want 0", blob.RefCount)
}
}
func TestBlobStore_GetBySHA256_NotFound(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
blob, err := store.GetBySHA256(ctx, "nonexistent")
if err != nil {
t.Fatalf("GetBySHA256() error = %v", err)
}
if blob != nil {
t.Error("GetBySHA256() should return nil for not found")
}
}
func TestBlobStore_IncrementDecrementRef(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
store.Create(ctx, &model.FileBlob{SHA256: "abc", MinioKey: "files/abc", FileSize: 100, RefCount: 0})
if err := store.IncrementRef(ctx, "abc"); err != nil {
t.Fatalf("IncrementRef() error = %v", err)
}
blob, _ := store.GetBySHA256(ctx, "abc")
if blob.RefCount != 1 {
t.Errorf("RefCount after 1st increment = %d, want 1", blob.RefCount)
}
store.IncrementRef(ctx, "abc")
blob, _ = store.GetBySHA256(ctx, "abc")
if blob.RefCount != 2 {
t.Errorf("RefCount after 2nd increment = %d, want 2", blob.RefCount)
}
refCount, err := store.DecrementRef(ctx, "abc")
if err != nil {
t.Fatalf("DecrementRef() error = %v", err)
}
if refCount != 1 {
t.Errorf("DecrementRef() returned %d, want 1", refCount)
}
refCount, _ = store.DecrementRef(ctx, "abc")
if refCount != 0 {
t.Errorf("DecrementRef() returned %d, want 0", refCount)
}
}
func TestBlobStore_Delete(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
store.Create(ctx, &model.FileBlob{SHA256: "abc", MinioKey: "files/abc", FileSize: 100, RefCount: 0})
if err := store.Delete(ctx, "abc"); err != nil {
t.Fatalf("Delete() error = %v", err)
}
blob, _ := store.GetBySHA256(ctx, "abc")
if blob != nil {
t.Error("Delete() did not remove blob")
}
}
func TestBlobStore_SHA256_UniqueConstraint(t *testing.T) {
db := setupBlobTestDB(t)
store := NewBlobStore(db)
ctx := context.Background()
store.Create(ctx, &model.FileBlob{SHA256: "dup", MinioKey: "files/dup1", FileSize: 100})
err := store.Create(ctx, &model.FileBlob{SHA256: "dup", MinioKey: "files/dup2", FileSize: 200})
if err == nil {
t.Error("expected error for duplicate SHA256, got nil")
}
}

View File

@@ -0,0 +1,99 @@
package store
import (
"context"
"errors"
"gcy_hpc_server/internal/model"
"gorm.io/gorm"
)
type FileStore struct {
db *gorm.DB
}
func NewFileStore(db *gorm.DB) *FileStore {
return &FileStore{db: db}
}
func (s *FileStore) Create(ctx context.Context, file *model.File) error {
return s.db.WithContext(ctx).Create(file).Error
}
func (s *FileStore) GetByID(ctx context.Context, id int64) (*model.File, error) {
var file model.File
err := s.db.WithContext(ctx).First(&file, id).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &file, nil
}
func (s *FileStore) List(ctx context.Context, folderID *int64, page, pageSize int) ([]model.File, int64, error) {
query := s.db.WithContext(ctx).Model(&model.File{})
if folderID == nil {
query = query.Where("folder_id IS NULL")
} else {
query = query.Where("folder_id = ?", *folderID)
}
var total int64
if err := query.Count(&total).Error; err != nil {
return nil, 0, err
}
var files []model.File
offset := (page - 1) * pageSize
if err := query.Order("id DESC").Limit(pageSize).Offset(offset).Find(&files).Error; err != nil {
return nil, 0, err
}
return files, total, nil
}
func (s *FileStore) Search(ctx context.Context, queryStr string, page, pageSize int) ([]model.File, int64, error) {
query := s.db.WithContext(ctx).Model(&model.File{}).Where("name LIKE ?", "%"+queryStr+"%")
var total int64
if err := query.Count(&total).Error; err != nil {
return nil, 0, err
}
var files []model.File
offset := (page - 1) * pageSize
if err := query.Order("id DESC").Limit(pageSize).Offset(offset).Find(&files).Error; err != nil {
return nil, 0, err
}
return files, total, nil
}
func (s *FileStore) Delete(ctx context.Context, id int64) error {
result := s.db.WithContext(ctx).Delete(&model.File{}, id)
if result.Error != nil {
return result.Error
}
return nil
}
func (s *FileStore) CountByBlobSHA256(ctx context.Context, blobSHA256 string) (int64, error) {
var count int64
err := s.db.WithContext(ctx).Model(&model.File{}).
Where("blob_sha256 = ?", blobSHA256).
Count(&count).Error
return count, err
}
func (s *FileStore) GetBlobSHA256ByID(ctx context.Context, id int64) (string, error) {
var file model.File
err := s.db.WithContext(ctx).Select("blob_sha256").First(&file, id).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return "", nil
}
if err != nil {
return "", err
}
return file.BlobSHA256, nil
}

View File

@@ -0,0 +1,245 @@
package store
import (
"context"
"testing"
"gcy_hpc_server/internal/model"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func setupFileTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.File{}, &model.FileBlob{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func TestFileStore_CreateAndGetByID(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
file := &model.File{
Name: "test.bin",
BlobSHA256: "abc123",
}
if err := store.Create(ctx, file); err != nil {
t.Fatalf("Create() error = %v", err)
}
if file.ID == 0 {
t.Fatal("Create() did not set ID")
}
got, err := store.GetByID(ctx, file.ID)
if err != nil {
t.Fatalf("GetByID() error = %v", err)
}
if got == nil {
t.Fatal("GetByID() returned nil")
}
if got.Name != "test.bin" {
t.Errorf("Name = %q, want %q", got.Name, "test.bin")
}
if got.BlobSHA256 != "abc123" {
t.Errorf("BlobSHA256 = %q, want %q", got.BlobSHA256, "abc123")
}
}
func TestFileStore_GetByID_NotFound(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
got, err := store.GetByID(ctx, 999)
if err != nil {
t.Fatalf("GetByID() error = %v", err)
}
if got != nil {
t.Error("GetByID() should return nil for not found")
}
}
func TestFileStore_ListByFolder(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
folderID := int64(1)
store.Create(ctx, &model.File{Name: "f1.bin", BlobSHA256: "a1", FolderID: &folderID})
store.Create(ctx, &model.File{Name: "f2.bin", BlobSHA256: "a2", FolderID: &folderID})
store.Create(ctx, &model.File{Name: "root.bin", BlobSHA256: "a3"}) // root (folder_id=nil)
files, total, err := store.List(ctx, &folderID, 1, 10)
if err != nil {
t.Fatalf("List() error = %v", err)
}
if total != 2 {
t.Errorf("total = %d, want 2", total)
}
if len(files) != 2 {
t.Errorf("len(files) = %d, want 2", len(files))
}
}
func TestFileStore_ListRootFolder(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
store.Create(ctx, &model.File{Name: "root.bin", BlobSHA256: "a1"})
folderID := int64(1)
store.Create(ctx, &model.File{Name: "sub.bin", BlobSHA256: "a2", FolderID: &folderID})
files, total, err := store.List(ctx, nil, 1, 10)
if err != nil {
t.Fatalf("List() error = %v", err)
}
if total != 1 {
t.Errorf("total = %d, want 1", total)
}
if len(files) != 1 {
t.Errorf("len(files) = %d, want 1", len(files))
}
if files[0].Name != "root.bin" {
t.Errorf("files[0].Name = %q, want %q", files[0].Name, "root.bin")
}
}
func TestFileStore_Pagination(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
for i := 0; i < 25; i++ {
store.Create(ctx, &model.File{Name: "file.bin", BlobSHA256: "hash"})
}
files, total, err := store.List(ctx, nil, 1, 10)
if err != nil {
t.Fatalf("List() error = %v", err)
}
if total != 25 {
t.Errorf("total = %d, want 25", total)
}
if len(files) != 10 {
t.Errorf("page 1 len = %d, want 10", len(files))
}
files, _, _ = store.List(ctx, nil, 3, 10)
if len(files) != 5 {
t.Errorf("page 3 len = %d, want 5", len(files))
}
}
func TestFileStore_Search(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
store.Create(ctx, &model.File{Name: "experiment_results.csv", BlobSHA256: "a1"})
store.Create(ctx, &model.File{Name: "training_log.txt", BlobSHA256: "a2"})
store.Create(ctx, &model.File{Name: "model_weights.bin", BlobSHA256: "a3"})
files, total, err := store.Search(ctx, "results", 1, 10)
if err != nil {
t.Fatalf("Search() error = %v", err)
}
if total != 1 {
t.Errorf("total = %d, want 1", total)
}
if len(files) != 1 || files[0].Name != "experiment_results.csv" {
t.Errorf("expected experiment_results.csv, got %v", files)
}
}
func TestFileStore_Delete_SoftDelete(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
file := &model.File{Name: "deleteme.bin", BlobSHA256: "abc"}
store.Create(ctx, file)
if err := store.Delete(ctx, file.ID); err != nil {
t.Fatalf("Delete() error = %v", err)
}
// GetByID should return nil (soft deleted)
got, err := store.GetByID(ctx, file.ID)
if err != nil {
t.Fatalf("GetByID() error = %v", err)
}
if got != nil {
t.Error("GetByID() should return nil after soft delete")
}
// List should not include soft deleted
_, total, _ := store.List(ctx, nil, 1, 10)
if total != 0 {
t.Errorf("total after delete = %d, want 0", total)
}
}
func TestFileStore_CountByBlobSHA256(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
store.Create(ctx, &model.File{Name: "a.bin", BlobSHA256: "shared_hash"})
store.Create(ctx, &model.File{Name: "b.bin", BlobSHA256: "shared_hash"})
store.Create(ctx, &model.File{Name: "c.bin", BlobSHA256: "shared_hash"})
count, err := store.CountByBlobSHA256(ctx, "shared_hash")
if err != nil {
t.Fatalf("CountByBlobSHA256() error = %v", err)
}
if count != 3 {
t.Errorf("count = %d, want 3", count)
}
// Soft delete one
store.Delete(ctx, 1)
count, _ = store.CountByBlobSHA256(ctx, "shared_hash")
if count != 2 {
t.Errorf("count after soft delete = %d, want 2", count)
}
}
func TestFileStore_GetBlobSHA256ByID(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
file := &model.File{Name: "test.bin", BlobSHA256: "my_hash"}
store.Create(ctx, file)
sha256, err := store.GetBlobSHA256ByID(ctx, file.ID)
if err != nil {
t.Fatalf("GetBlobSHA256ByID() error = %v", err)
}
if sha256 != "my_hash" {
t.Errorf("sha256 = %q, want %q", sha256, "my_hash")
}
}
func TestFileStore_GetBlobSHA256ByID_NotFound(t *testing.T) {
db := setupFileTestDB(t)
store := NewFileStore(db)
ctx := context.Background()
sha256, err := store.GetBlobSHA256ByID(ctx, 999)
if err != nil {
t.Fatalf("GetBlobSHA256ByID() error = %v", err)
}
if sha256 != "" {
t.Errorf("sha256 = %q, want empty for not found", sha256)
}
}

View File

@@ -0,0 +1,105 @@
package store
import (
"context"
"errors"
"fmt"
"gcy_hpc_server/internal/model"
"gorm.io/gorm"
)
type FolderStore struct {
db *gorm.DB
}
func NewFolderStore(db *gorm.DB) *FolderStore {
return &FolderStore{db: db}
}
func (s *FolderStore) Create(ctx context.Context, folder *model.Folder) error {
return s.db.WithContext(ctx).Create(folder).Error
}
func (s *FolderStore) GetByID(ctx context.Context, id int64) (*model.Folder, error) {
var folder model.Folder
err := s.db.WithContext(ctx).First(&folder, id).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &folder, nil
}
func (s *FolderStore) GetByPath(ctx context.Context, path string) (*model.Folder, error) {
var folder model.Folder
err := s.db.WithContext(ctx).Where("path = ?", path).First(&folder).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &folder, nil
}
func (s *FolderStore) ListByParentID(ctx context.Context, parentID *int64) ([]model.Folder, error) {
var folders []model.Folder
query := s.db.WithContext(ctx)
if parentID == nil {
query = query.Where("parent_id IS NULL")
} else {
query = query.Where("parent_id = ?", *parentID)
}
if err := query.Order("name ASC").Find(&folders).Error; err != nil {
return nil, err
}
return folders, nil
}
// GetSubTree returns all folders whose path starts with the given prefix.
func (s *FolderStore) GetSubTree(ctx context.Context, path string) ([]model.Folder, error) {
var folders []model.Folder
if err := s.db.WithContext(ctx).Where("path LIKE ?", path+"%").Find(&folders).Error; err != nil {
return nil, err
}
return folders, nil
}
// HasChildren checks if a folder has sub-folders or files.
func (s *FolderStore) HasChildren(ctx context.Context, id int64) (bool, error) {
folder, err := s.GetByID(ctx, id)
if err != nil {
return false, err
}
if folder == nil {
return false, nil
}
// Check for sub-folders
var subFolderCount int64
if err := s.db.WithContext(ctx).Model(&model.Folder{}).Where("parent_id = ?", id).Count(&subFolderCount).Error; err != nil {
return false, fmt.Errorf("count sub-folders: %w", err)
}
if subFolderCount > 0 {
return true, nil
}
// Check for files
var fileCount int64
if err := s.db.WithContext(ctx).Model(&model.File{}).Where("folder_id = ?", id).Count(&fileCount).Error; err != nil {
return false, fmt.Errorf("count files: %w", err)
}
return fileCount > 0, nil
}
func (s *FolderStore) Delete(ctx context.Context, id int64) error {
result := s.db.WithContext(ctx).Delete(&model.Folder{}, id)
if result.Error != nil {
return result.Error
}
return nil
}

View File

@@ -0,0 +1,294 @@
package store
import (
"context"
"testing"
"gcy_hpc_server/internal/model"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
func setupFolderTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.Folder{}, &model.File{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func TestFolderStore_CreateAndGetByID(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
folder := &model.Folder{
Name: "data",
Path: "/data/",
}
if err := s.Create(context.Background(), folder); err != nil {
t.Fatalf("Create() error = %v", err)
}
if folder.ID <= 0 {
t.Fatalf("Create() id = %d, want positive", folder.ID)
}
got, err := s.GetByID(context.Background(), folder.ID)
if err != nil {
t.Fatalf("GetByID() error = %v", err)
}
if got == nil {
t.Fatal("GetByID() returned nil")
}
if got.Name != "data" {
t.Errorf("Name = %q, want %q", got.Name, "data")
}
if got.Path != "/data/" {
t.Errorf("Path = %q, want %q", got.Path, "/data/")
}
}
func TestFolderStore_GetByID_NotFound(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
got, err := s.GetByID(context.Background(), 99999)
if err != nil {
t.Fatalf("GetByID() error = %v", err)
}
if got != nil {
t.Error("GetByID() expected nil for not-found")
}
}
func TestFolderStore_GetByPath(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
folder := &model.Folder{
Name: "data",
Path: "/data/",
}
if err := s.Create(context.Background(), folder); err != nil {
t.Fatalf("Create() error = %v", err)
}
got, err := s.GetByPath(context.Background(), "/data/")
if err != nil {
t.Fatalf("GetByPath() error = %v", err)
}
if got == nil {
t.Fatal("GetByPath() returned nil")
}
if got.ID != folder.ID {
t.Errorf("ID = %d, want %d", got.ID, folder.ID)
}
got, err = s.GetByPath(context.Background(), "/nonexistent/")
if err != nil {
t.Fatalf("GetByPath() nonexistent error = %v", err)
}
if got != nil {
t.Error("GetByPath() expected nil for nonexistent path")
}
}
func TestFolderStore_ListByParentID(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
root1 := &model.Folder{Name: "alpha", Path: "/alpha/"}
root2 := &model.Folder{Name: "beta", Path: "/beta/"}
if err := s.Create(context.Background(), root1); err != nil {
t.Fatalf("Create root1: %v", err)
}
if err := s.Create(context.Background(), root2); err != nil {
t.Fatalf("Create root2: %v", err)
}
sub := &model.Folder{Name: "sub", Path: "/alpha/sub/", ParentID: &root1.ID}
if err := s.Create(context.Background(), sub); err != nil {
t.Fatalf("Create sub: %v", err)
}
roots, err := s.ListByParentID(context.Background(), nil)
if err != nil {
t.Fatalf("ListByParentID(nil) error = %v", err)
}
if len(roots) != 2 {
t.Fatalf("root folders = %d, want 2", len(roots))
}
if roots[0].Name != "alpha" {
t.Errorf("roots[0].Name = %q, want %q (alphabetical)", roots[0].Name, "alpha")
}
children, err := s.ListByParentID(context.Background(), &root1.ID)
if err != nil {
t.Fatalf("ListByParentID(root1) error = %v", err)
}
if len(children) != 1 {
t.Fatalf("children = %d, want 1", len(children))
}
if children[0].Name != "sub" {
t.Errorf("children[0].Name = %q, want %q", children[0].Name, "sub")
}
}
func TestFolderStore_GetSubTree(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
data := &model.Folder{Name: "data", Path: "/data/"}
if err := s.Create(context.Background(), data); err != nil {
t.Fatalf("Create data: %v", err)
}
results := &model.Folder{Name: "results", Path: "/data/results/", ParentID: &data.ID}
if err := s.Create(context.Background(), results); err != nil {
t.Fatalf("Create results: %v", err)
}
other := &model.Folder{Name: "other", Path: "/other/"}
if err := s.Create(context.Background(), other); err != nil {
t.Fatalf("Create other: %v", err)
}
subtree, err := s.GetSubTree(context.Background(), "/data/")
if err != nil {
t.Fatalf("GetSubTree() error = %v", err)
}
if len(subtree) != 2 {
t.Fatalf("subtree = %d, want 2", len(subtree))
}
}
func TestFolderStore_HasChildren_WithSubFolders(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
parent := &model.Folder{Name: "parent", Path: "/parent/"}
if err := s.Create(context.Background(), parent); err != nil {
t.Fatalf("Create parent: %v", err)
}
child := &model.Folder{Name: "child", Path: "/parent/child/", ParentID: &parent.ID}
if err := s.Create(context.Background(), child); err != nil {
t.Fatalf("Create child: %v", err)
}
has, err := s.HasChildren(context.Background(), parent.ID)
if err != nil {
t.Fatalf("HasChildren() error = %v", err)
}
if !has {
t.Error("HasChildren() = false, want true (has sub-folders)")
}
}
func TestFolderStore_HasChildren_WithFiles(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
folder := &model.Folder{Name: "docs", Path: "/docs/"}
if err := s.Create(context.Background(), folder); err != nil {
t.Fatalf("Create folder: %v", err)
}
file := &model.File{
Name: "readme.txt",
FolderID: &folder.ID,
BlobSHA256: "abc123",
}
if err := db.Create(file).Error; err != nil {
t.Fatalf("Create file: %v", err)
}
has, err := s.HasChildren(context.Background(), folder.ID)
if err != nil {
t.Fatalf("HasChildren() error = %v", err)
}
if !has {
t.Error("HasChildren() = false, want true (has files)")
}
}
func TestFolderStore_HasChildren_Empty(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
folder := &model.Folder{Name: "empty", Path: "/empty/"}
if err := s.Create(context.Background(), folder); err != nil {
t.Fatalf("Create folder: %v", err)
}
has, err := s.HasChildren(context.Background(), folder.ID)
if err != nil {
t.Fatalf("HasChildren() error = %v", err)
}
if has {
t.Error("HasChildren() = true, want false (empty folder)")
}
}
func TestFolderStore_HasChildren_NotFound(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
has, err := s.HasChildren(context.Background(), 99999)
if err != nil {
t.Fatalf("HasChildren() error = %v", err)
}
if has {
t.Error("HasChildren() = true for nonexistent, want false")
}
}
func TestFolderStore_Delete(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
folder := &model.Folder{Name: "temp", Path: "/temp/"}
if err := s.Create(context.Background(), folder); err != nil {
t.Fatalf("Create() error = %v", err)
}
if err := s.Delete(context.Background(), folder.ID); err != nil {
t.Fatalf("Delete() error = %v", err)
}
got, err := s.GetByID(context.Background(), folder.ID)
if err != nil {
t.Fatalf("GetByID() after delete error = %v", err)
}
if got != nil {
t.Error("GetByID() after delete returned non-nil, expected soft-deleted")
}
}
func TestFolderStore_Delete_Idempotent(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
if err := s.Delete(context.Background(), 99999); err != nil {
t.Fatalf("Delete() non-existent error = %v, want nil (idempotent)", err)
}
}
func TestFolderStore_Path_UniqueConstraint(t *testing.T) {
db := setupFolderTestDB(t)
s := NewFolderStore(db)
f1 := &model.Folder{Name: "data", Path: "/data/"}
if err := s.Create(context.Background(), f1); err != nil {
t.Fatalf("Create first: %v", err)
}
f2 := &model.Folder{Name: "data2", Path: "/data/"}
if err := s.Create(context.Background(), f2); err == nil {
t.Fatal("expected error for duplicate path, got nil")
}
}

View File

@@ -40,5 +40,12 @@ func NewGormDB(dsn string, zapLogger *zap.Logger, gormLevel string) (*gorm.DB, e
// AutoMigrate runs GORM auto-migration for all models. // AutoMigrate runs GORM auto-migration for all models.
func AutoMigrate(db *gorm.DB) error { func AutoMigrate(db *gorm.DB) error {
return db.AutoMigrate(&model.Application{}) return db.AutoMigrate(
&model.Application{},
&model.FileBlob{},
&model.File{},
&model.Folder{},
&model.UploadSession{},
&model.UploadChunk{},
)
} }

View File

@@ -0,0 +1,117 @@
package store
import (
"context"
"errors"
"fmt"
"time"
"gcy_hpc_server/internal/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// UploadStore manages upload sessions and chunks with idempotent upsert support.
type UploadStore struct {
db *gorm.DB
}
// NewUploadStore creates a new UploadStore.
func NewUploadStore(db *gorm.DB) *UploadStore {
return &UploadStore{db: db}
}
// CreateSession inserts a new upload session.
func (s *UploadStore) CreateSession(ctx context.Context, session *model.UploadSession) error {
return s.db.WithContext(ctx).Create(session).Error
}
// GetSession returns an upload session by ID. Returns (nil, nil) if not found.
func (s *UploadStore) GetSession(ctx context.Context, id int64) (*model.UploadSession, error) {
var session model.UploadSession
err := s.db.WithContext(ctx).First(&session, id).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
if err != nil {
return nil, err
}
return &session, nil
}
// GetSessionWithChunks returns an upload session along with its chunks ordered by chunk_index.
func (s *UploadStore) GetSessionWithChunks(ctx context.Context, id int64) (*model.UploadSession, []model.UploadChunk, error) {
session, err := s.GetSession(ctx, id)
if err != nil {
return nil, nil, err
}
if session == nil {
return nil, nil, nil
}
var chunks []model.UploadChunk
if err := s.db.WithContext(ctx).Where("session_id = ?", id).Order("chunk_index ASC").Find(&chunks).Error; err != nil {
return nil, nil, err
}
return session, chunks, nil
}
// UpdateSessionStatus updates the status field of an upload session.
// Returns gorm.ErrRecordNotFound if no row was affected.
func (s *UploadStore) UpdateSessionStatus(ctx context.Context, id int64, status string) error {
result := s.db.WithContext(ctx).Model(&model.UploadSession{}).Where("id = ?", id).Update("status", status)
if result.Error != nil {
return result.Error
}
if result.RowsAffected == 0 {
return gorm.ErrRecordNotFound
}
return nil
}
// ListExpiredSessions returns sessions that are not in a terminal state and have expired.
func (s *UploadStore) ListExpiredSessions(ctx context.Context) ([]model.UploadSession, error) {
var sessions []model.UploadSession
err := s.db.WithContext(ctx).
Where("status NOT IN ?", []string{"completed", "cancelled", "expired"}).
Where("expires_at < ?", time.Now()).
Find(&sessions).Error
return sessions, err
}
// DeleteSession removes all chunks for a session, then the session itself.
func (s *UploadStore) DeleteSession(ctx context.Context, id int64) error {
if err := s.db.WithContext(ctx).Where("session_id = ?", id).Delete(&model.UploadChunk{}).Error; err != nil {
return fmt.Errorf("delete chunks: %w", err)
}
result := s.db.WithContext(ctx).Delete(&model.UploadSession{}, id)
return result.Error
}
// UpsertChunk inserts a chunk or updates it if the (session_id, chunk_index) pair already exists.
// Uses GORM clause.OnConflict for dialect-neutral upsert (works with both SQLite and MySQL).
func (s *UploadStore) UpsertChunk(ctx context.Context, chunk *model.UploadChunk) error {
return s.db.WithContext(ctx).Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "session_id"}, {Name: "chunk_index"}},
DoUpdates: clause.AssignmentColumns([]string{"minio_key", "sha256", "size", "status", "updated_at"}),
}).Create(chunk).Error
}
// GetUploadedChunkIndices returns the chunk indices that have been successfully uploaded.
func (s *UploadStore) GetUploadedChunkIndices(ctx context.Context, sessionID int64) ([]int, error) {
var indices []int
err := s.db.WithContext(ctx).Model(&model.UploadChunk{}).
Where("session_id = ? AND status = ?", sessionID, "uploaded").
Pluck("chunk_index", &indices).Error
return indices, err
}
// CountUploadedChunks returns the number of chunks with status "uploaded" for a session.
func (s *UploadStore) CountUploadedChunks(ctx context.Context, sessionID int64) (int, error) {
var count int64
err := s.db.WithContext(ctx).Model(&model.UploadChunk{}).
Where("session_id = ? AND status = ?", sessionID, "uploaded").
Count(&count).Error
return int(count), err
}

View File

@@ -0,0 +1,329 @@
package store
import (
"context"
"testing"
"time"
"gcy_hpc_server/internal/model"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
func setupUploadTestDB(t *testing.T) *gorm.DB {
t.Helper()
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
t.Fatalf("open sqlite: %v", err)
}
if err := db.AutoMigrate(&model.UploadSession{}, &model.UploadChunk{}); err != nil {
t.Fatalf("migrate: %v", err)
}
return db
}
func newTestSession(status string, expiresAt time.Time) *model.UploadSession {
return &model.UploadSession{
FileName: "test.bin",
FileSize: 100 * 1024 * 1024,
ChunkSize: 16 << 20,
TotalChunks: 7,
SHA256: "abc123",
Status: status,
MinioPrefix: "uploads/1/",
ExpiresAt: expiresAt,
}
}
func newTestChunk(sessionID int64, index int, status string) *model.UploadChunk {
return &model.UploadChunk{
SessionID: sessionID,
ChunkIndex: index,
MinioKey: "uploads/1/chunk_00000",
SHA256: "chunk_hash_0",
Size: 16 << 20,
Status: status,
}
}
func TestUploadStore_CreateSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
if err := s.CreateSession(context.Background(), session); err != nil {
t.Fatalf("CreateSession() error = %v", err)
}
if session.ID <= 0 {
t.Errorf("ID = %d, want positive", session.ID)
}
if session.FileName != "test.bin" {
t.Errorf("FileName = %q, want %q", session.FileName, "test.bin")
}
if session.ExpiresAt.IsZero() {
t.Error("ExpiresAt is zero, want a real timestamp")
}
}
func TestUploadStore_GetSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
got, err := s.GetSession(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetSession() error = %v", err)
}
if got == nil {
t.Fatal("GetSession() returned nil")
}
if got.FileName != session.FileName {
t.Errorf("FileName = %q, want %q", got.FileName, session.FileName)
}
if got.Status != "pending" {
t.Errorf("Status = %q, want %q", got.Status, "pending")
}
}
func TestUploadStore_GetSession_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
got, err := s.GetSession(context.Background(), 99999)
if err != nil {
t.Fatalf("GetSession() error = %v", err)
}
if got != nil {
t.Error("GetSession() expected nil for not-found")
}
}
func TestUploadStore_UpdateSessionStatus(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("pending", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
if err := s.UpdateSessionStatus(context.Background(), session.ID, "uploading"); err != nil {
t.Fatalf("UpdateSessionStatus() error = %v", err)
}
got, _ := s.GetSession(context.Background(), session.ID)
if got.Status != "uploading" {
t.Errorf("Status = %q, want %q", got.Status, "uploading")
}
}
func TestUploadStore_UpdateSessionStatus_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
err := s.UpdateSessionStatus(context.Background(), 99999, "uploading")
if err == nil {
t.Fatal("UpdateSessionStatus() expected error for not-found, got nil")
}
}
func TestUploadStore_GetSessionWithChunks(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "h0", Size: 16 << 20, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "uploads/1/chunk_2", SHA256: "h2", Size: 16 << 20, Status: "uploaded",
})
gotSession, chunks, err := s.GetSessionWithChunks(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetSessionWithChunks() error = %v", err)
}
if gotSession == nil {
t.Fatal("GetSessionWithChunks() session is nil")
}
if len(chunks) != 2 {
t.Fatalf("len(chunks) = %d, want 2", len(chunks))
}
if chunks[0].ChunkIndex != 0 {
t.Errorf("chunks[0].ChunkIndex = %d, want 0", chunks[0].ChunkIndex)
}
if chunks[1].ChunkIndex != 2 {
t.Errorf("chunks[1].ChunkIndex = %d, want 2", chunks[1].ChunkIndex)
}
}
func TestUploadStore_GetSessionWithChunks_NotFound(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
gotSession, chunks, err := s.GetSessionWithChunks(context.Background(), 99999)
if err != nil {
t.Fatalf("GetSessionWithChunks() error = %v", err)
}
if gotSession != nil {
t.Error("expected nil session for not-found")
}
if chunks != nil {
t.Error("expected nil chunks for not-found")
}
}
func TestUploadStore_UpsertChunk_Idempotent(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
chunk := &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "hash_v1", Size: 1024, Status: "uploaded",
}
if err := s.UpsertChunk(context.Background(), chunk); err != nil {
t.Fatalf("first UpsertChunk() error = %v", err)
}
chunk2 := &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "uploads/1/chunk_0", SHA256: "hash_v2", Size: 2048, Status: "uploaded",
}
if err := s.UpsertChunk(context.Background(), chunk2); err != nil {
t.Fatalf("second UpsertChunk() error = %v", err)
}
indices, _ := s.GetUploadedChunkIndices(context.Background(), session.ID)
if len(indices) != 1 {
t.Errorf("len(indices) = %d, want 1 (idempotent)", len(indices))
}
var got model.UploadChunk
db.Where("session_id = ? AND chunk_index = ?", session.ID, 0).First(&got)
if got.SHA256 != "hash_v2" {
t.Errorf("SHA256 = %q, want %q (updated on conflict)", got.SHA256, "hash_v2")
}
}
func TestUploadStore_GetUploadedChunkIndices(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "pending",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "k2", Size: 100, Status: "uploaded",
})
indices, err := s.GetUploadedChunkIndices(context.Background(), session.ID)
if err != nil {
t.Fatalf("GetUploadedChunkIndices() error = %v", err)
}
if len(indices) != 2 {
t.Fatalf("len(indices) = %d, want 2", len(indices))
}
if indices[0] != 0 || indices[1] != 2 {
t.Errorf("indices = %v, want [0 2]", indices)
}
}
func TestUploadStore_CountUploadedChunks(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 2, MinioKey: "k2", Size: 100, Status: "pending",
})
count, err := s.CountUploadedChunks(context.Background(), session.ID)
if err != nil {
t.Fatalf("CountUploadedChunks() error = %v", err)
}
if count != 2 {
t.Errorf("count = %d, want 2", count)
}
}
func TestUploadStore_ListExpiredSessions(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
past := time.Now().Add(-1 * time.Hour)
future := time.Now().Add(48 * time.Hour)
expired := newTestSession("pending", past)
expired.FileName = "expired.bin"
s.CreateSession(context.Background(), expired)
active := newTestSession("uploading", future)
active.FileName = "active.bin"
s.CreateSession(context.Background(), active)
completed := newTestSession("completed", past)
completed.FileName = "completed.bin"
s.CreateSession(context.Background(), completed)
sessions, err := s.ListExpiredSessions(context.Background())
if err != nil {
t.Fatalf("ListExpiredSessions() error = %v", err)
}
if len(sessions) != 1 {
t.Fatalf("len(sessions) = %d, want 1", len(sessions))
}
if sessions[0].FileName != "expired.bin" {
t.Errorf("FileName = %q, want %q", sessions[0].FileName, "expired.bin")
}
}
func TestUploadStore_DeleteSession(t *testing.T) {
db := setupUploadTestDB(t)
s := NewUploadStore(db)
session := newTestSession("uploading", time.Now().Add(48*time.Hour))
s.CreateSession(context.Background(), session)
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 0, MinioKey: "k0", Size: 100, Status: "uploaded",
})
s.UpsertChunk(context.Background(), &model.UploadChunk{
SessionID: session.ID, ChunkIndex: 1, MinioKey: "k1", Size: 100, Status: "uploaded",
})
if err := s.DeleteSession(context.Background(), session.ID); err != nil {
t.Fatalf("DeleteSession() error = %v", err)
}
got, _ := s.GetSession(context.Background(), session.ID)
if got != nil {
t.Error("session still exists after delete")
}
var chunkCount int64
db.Model(&model.UploadChunk{}).Where("session_id = ?", session.ID).Count(&chunkCount)
if chunkCount != 0 {
t.Errorf("chunkCount = %d, want 0 after delete", chunkCount)
}
}