- mockminio: in-memory ObjectStorage with all 11 methods, thread-safe, SHA256 ETag, Range support - mockslurm: httptest server with 11 Slurm REST API endpoints, job eviction from active to history queue - testenv: one-line test environment factory (SQLite + MockSlurm + MockMinIO + all stores/services/handlers + httptest server) - integration tests: 37 tests covering Jobs(5), Cluster(5), App(6), Upload(5), File(4), Folder(4), Task(4), E2E(1) - no external dependencies, no existing files modified
406 lines
12 KiB
Go
406 lines
12 KiB
Go
// Package testenv provides a complete test environment factory that wires up
|
|
// SQLite DB + MockSlurm + MockMinIO + all Stores/Services/Handlers + httptest Server.
|
|
package testenv
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"crypto/sha256"
|
|
"database/sql"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"math/rand"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"os"
|
|
"time"
|
|
|
|
"gcy_hpc_server/internal/app"
|
|
"gcy_hpc_server/internal/config"
|
|
"gcy_hpc_server/internal/handler"
|
|
"gcy_hpc_server/internal/model"
|
|
"gcy_hpc_server/internal/server"
|
|
"gcy_hpc_server/internal/service"
|
|
"gcy_hpc_server/internal/slurm"
|
|
"gcy_hpc_server/internal/storage"
|
|
"gcy_hpc_server/internal/store"
|
|
"gcy_hpc_server/internal/testutil/mockminio"
|
|
"gcy_hpc_server/internal/testutil/mockslurm"
|
|
|
|
"go.uber.org/zap"
|
|
"gorm.io/driver/sqlite"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
// TestEnv holds a fully wired test environment with all dependencies.
|
|
type TestEnv struct {
|
|
DB *gorm.DB
|
|
MockSlurm *mockslurm.MockSlurm
|
|
MockMinIO *mockminio.InMemoryStorage
|
|
|
|
// Internal
|
|
mockSlurmSrv *httptest.Server
|
|
srv *httptest.Server
|
|
poller *app.TaskPoller
|
|
|
|
// Stores
|
|
appStore *store.ApplicationStore
|
|
taskStore *store.TaskStore
|
|
fileStore *store.FileStore
|
|
blobStore *store.BlobStore
|
|
uploadStore *store.UploadStore
|
|
folderStore *store.FolderStore
|
|
|
|
// Services
|
|
jobSvc *service.JobService
|
|
clusterSvc *service.ClusterService
|
|
folderSvc *service.FolderService
|
|
stagingSvc *service.FileStagingService
|
|
taskSvc *service.TaskService
|
|
appSvc *service.ApplicationService
|
|
uploadSvc *service.UploadService
|
|
fileSvc *service.FileService
|
|
|
|
jobH *handler.JobHandler
|
|
clusterH *handler.ClusterHandler
|
|
appH *handler.ApplicationHandler
|
|
uploadH *handler.UploadHandler
|
|
fileH *handler.FileHandler
|
|
folderH *handler.FolderHandler
|
|
taskH *handler.TaskHandler
|
|
|
|
workDir string
|
|
logger *zap.Logger
|
|
}
|
|
|
|
// Option configures a TestEnv during construction.
|
|
type Option func(*testEnvConfig)
|
|
|
|
type testEnvConfig struct {
|
|
workDir string
|
|
debugLog bool
|
|
}
|
|
|
|
// WithWorkDir overrides the default temporary work directory.
|
|
func WithWorkDir(path string) Option {
|
|
return func(c *testEnvConfig) { c.workDir = path }
|
|
}
|
|
|
|
// WithDebugLogging switches from zap.NewNop() to zap.NewExample().
|
|
func WithDebugLogging() Option {
|
|
return func(c *testEnvConfig) { c.debugLog = true }
|
|
}
|
|
|
|
// NewTestEnv creates a fully wired test environment.
|
|
// t.Cleanup is registered for all resources — callers do not need to clean up.
|
|
func NewTestEnv(t interface {
|
|
Fatalf(format string, args ...interface{})
|
|
}, opts ...Option) *TestEnv {
|
|
cfg := &testEnvConfig{}
|
|
for _, o := range opts {
|
|
o(cfg)
|
|
}
|
|
|
|
var logger *zap.Logger
|
|
if cfg.debugLog {
|
|
logger = zap.NewExample()
|
|
} else {
|
|
logger = zap.NewNop()
|
|
}
|
|
|
|
// 1. SQLite in-memory DB + AutoMigrate
|
|
dbName := fmt.Sprintf("file:testenv-%d?mode=memory&cache=shared", rand.Int63())
|
|
db, err := gorm.Open(sqlite.Open(dbName), &gorm.Config{})
|
|
if err != nil {
|
|
t.Fatalf("failed to open SQLite: %v", err)
|
|
}
|
|
sqlDB, err := db.DB()
|
|
if err != nil {
|
|
t.Fatalf("failed to get underlying sql.DB: %v", err)
|
|
}
|
|
sqlDB.SetMaxOpenConns(1)
|
|
if err := db.AutoMigrate(
|
|
&model.Application{},
|
|
&model.FileBlob{},
|
|
&model.File{},
|
|
&model.Folder{},
|
|
&model.UploadSession{},
|
|
&model.UploadChunk{},
|
|
&model.Task{},
|
|
); err != nil {
|
|
t.Fatalf("failed to auto-migrate: %v", err)
|
|
}
|
|
|
|
// 2. Temp work directory
|
|
workDir := cfg.workDir
|
|
if workDir == "" {
|
|
wd, err := os.MkdirTemp("", "testenv-workdir-*")
|
|
if err != nil {
|
|
t.Fatalf("failed to create temp dir: %v", err)
|
|
}
|
|
workDir = wd
|
|
}
|
|
|
|
// 3. MockSlurm httptest server
|
|
mockSlurmSrv, mockSlurm := mockslurm.NewMockSlurmServer()
|
|
|
|
// 4. MockMinIO
|
|
mockMinIO := mockminio.NewInMemoryStorage()
|
|
|
|
// 5. All 6 Store instances
|
|
appStore := store.NewApplicationStore(db)
|
|
taskStore := store.NewTaskStore(db)
|
|
fileStore := store.NewFileStore(db)
|
|
blobStore := store.NewBlobStore(db)
|
|
uploadStore := store.NewUploadStore(db)
|
|
folderStore := store.NewFolderStore(db)
|
|
|
|
// 6. Slurm client
|
|
slurmClient, err := slurm.NewClientWithOpts(mockSlurmSrv.URL, slurm.WithHTTPClient(mockSlurmSrv.Client()))
|
|
if err != nil {
|
|
t.Fatalf("failed to create slurm client: %v", err)
|
|
}
|
|
|
|
// 7. MinioConfig
|
|
minioCfg := config.MinioConfig{
|
|
ChunkSize: 16 << 20,
|
|
MaxFileSize: 50 << 30,
|
|
MinChunkSize: 5 << 20,
|
|
SessionTTL: 48,
|
|
Bucket: "files",
|
|
}
|
|
|
|
// 8. All Service instances (dependency order)
|
|
jobSvc := service.NewJobService(slurmClient, logger)
|
|
clusterSvc := service.NewClusterService(slurmClient, logger)
|
|
folderSvc := service.NewFolderService(folderStore, fileStore, logger)
|
|
stagingSvc := service.NewFileStagingService(fileStore, blobStore, mockMinIO, minioCfg.Bucket, logger)
|
|
taskSvc := service.NewTaskService(taskStore, appStore, fileStore, blobStore, stagingSvc, jobSvc, workDir, logger)
|
|
appSvc := service.NewApplicationService(appStore, jobSvc, workDir, logger, taskSvc)
|
|
uploadSvc := service.NewUploadService(mockMinIO, blobStore, fileStore, uploadStore, minioCfg, db, logger)
|
|
fileSvc := service.NewFileService(mockMinIO, blobStore, fileStore, minioCfg.Bucket, db, logger)
|
|
|
|
// 9. All 7 Handler instances
|
|
jobH := handler.NewJobHandler(jobSvc, logger)
|
|
clusterH := handler.NewClusterHandler(clusterSvc, logger)
|
|
appH := handler.NewApplicationHandler(appSvc, logger)
|
|
uploadH := handler.NewUploadHandler(uploadSvc, logger)
|
|
fileH := handler.NewFileHandler(fileSvc, logger)
|
|
folderH := handler.NewFolderHandler(folderSvc, logger)
|
|
taskH := handler.NewTaskHandler(taskSvc, logger)
|
|
|
|
// 10. Router
|
|
router := server.NewRouter(jobH, clusterH, appH, uploadH, fileH, folderH, taskH, logger)
|
|
|
|
// 11. HTTP test server
|
|
srv := httptest.NewServer(router)
|
|
|
|
// 12. Start TaskProcessor
|
|
ctx := context.Background()
|
|
taskSvc.StartProcessor(ctx)
|
|
|
|
// 13. Start TaskPoller (100ms interval for tests)
|
|
poller := app.NewTaskPoller(taskSvc, 100*time.Millisecond, logger)
|
|
poller.Start(ctx)
|
|
|
|
env := &TestEnv{
|
|
DB: db,
|
|
MockSlurm: mockSlurm,
|
|
MockMinIO: mockMinIO,
|
|
mockSlurmSrv: mockSlurmSrv,
|
|
srv: srv,
|
|
poller: poller,
|
|
appStore: appStore,
|
|
taskStore: taskStore,
|
|
fileStore: fileStore,
|
|
blobStore: blobStore,
|
|
uploadStore: uploadStore,
|
|
folderStore: folderStore,
|
|
jobSvc: jobSvc,
|
|
clusterSvc: clusterSvc,
|
|
folderSvc: folderSvc,
|
|
stagingSvc: stagingSvc,
|
|
taskSvc: taskSvc,
|
|
appSvc: appSvc,
|
|
uploadSvc: uploadSvc,
|
|
fileSvc: fileSvc,
|
|
jobH: jobH,
|
|
clusterH: clusterH,
|
|
appH: appH,
|
|
uploadH: uploadH,
|
|
fileH: fileH,
|
|
folderH: folderH,
|
|
taskH: taskH,
|
|
workDir: workDir,
|
|
logger: logger,
|
|
}
|
|
|
|
// Cleanup registration (LIFO — last registered runs first).
|
|
// We use a *testing.T-compatible interface; callers using *testing.T
|
|
// will have t.Cleanup work correctly.
|
|
if ct, ok := t.(interface{ Cleanup(func()) }); ok {
|
|
ct.Cleanup(func() { os.RemoveAll(workDir) })
|
|
ct.Cleanup(srv.Close)
|
|
ct.Cleanup(poller.Stop)
|
|
ct.Cleanup(taskSvc.StopProcessor)
|
|
}
|
|
|
|
return env
|
|
}
|
|
|
|
// URL returns the base URL of the test HTTP server.
|
|
func (env *TestEnv) URL() string {
|
|
return env.srv.URL
|
|
}
|
|
|
|
// DoRequest sends an HTTP request to the test server.
|
|
// When body is non-nil, Content-Type: application/json is set.
|
|
func (env *TestEnv) DoRequest(method, path string, body io.Reader) *http.Response {
|
|
req, err := http.NewRequest(method, env.srv.URL+path, body)
|
|
if err != nil {
|
|
panic(fmt.Sprintf("failed to create request: %v", err))
|
|
}
|
|
if body != nil {
|
|
req.Header.Set("Content-Type", "application/json")
|
|
}
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
panic(fmt.Sprintf("failed to send request: %v", err))
|
|
}
|
|
return resp
|
|
}
|
|
|
|
// DecodeResponse decodes an API response into its components.
|
|
// Returns success flag, raw data, and any error from decoding.
|
|
func (env *TestEnv) DecodeResponse(resp *http.Response) (bool, json.RawMessage, error) {
|
|
body, err := io.ReadAll(resp.Body)
|
|
if err != nil {
|
|
return false, nil, fmt.Errorf("read body: %w", err)
|
|
}
|
|
resp.Body.Close()
|
|
|
|
var result struct {
|
|
Success bool `json:"success"`
|
|
Data json.RawMessage `json:"data"`
|
|
Error string `json:"error"`
|
|
}
|
|
if err := json.Unmarshal(body, &result); err != nil {
|
|
return false, nil, fmt.Errorf("unmarshal response: %w (body: %s)", err, string(body))
|
|
}
|
|
return result.Success, result.Data, nil
|
|
}
|
|
|
|
// CreateApp creates an application directly via the service and returns the app ID.
|
|
func (env *TestEnv) CreateApp(name, scriptTemplate string, params json.RawMessage) (int64, error) {
|
|
ctx := context.Background()
|
|
appID, err := env.appSvc.CreateApplication(ctx, &model.CreateApplicationRequest{
|
|
Name: name,
|
|
ScriptTemplate: scriptTemplate,
|
|
Parameters: params,
|
|
})
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return appID, nil
|
|
}
|
|
|
|
// UploadTestData uploads content to MockMinIO and creates FileBlob + File records.
|
|
// Returns (fileID, blobID).
|
|
func (env *TestEnv) UploadTestData(name string, content []byte) (int64, int64) {
|
|
ctx := context.Background()
|
|
|
|
h := sha256.Sum256(content)
|
|
sha256Key := hex.EncodeToString(h[:])
|
|
|
|
_, err := env.MockMinIO.PutObject(ctx, "files", sha256Key, bytes.NewReader(content), int64(len(content)), storage.PutObjectOptions{})
|
|
if err != nil {
|
|
panic(fmt.Sprintf("PutObject failed: %v", err))
|
|
}
|
|
|
|
blob, err := env.blobStore.GetBySHA256(ctx, sha256Key)
|
|
if err != nil {
|
|
panic(fmt.Sprintf("get blob by sha256: %v", err))
|
|
}
|
|
if blob == nil {
|
|
blob = &model.FileBlob{
|
|
SHA256: sha256Key,
|
|
MinioKey: sha256Key,
|
|
FileSize: int64(len(content)),
|
|
RefCount: 1,
|
|
}
|
|
if err := env.blobStore.Create(ctx, blob); err != nil {
|
|
panic(fmt.Sprintf("create blob: %v", err))
|
|
}
|
|
}
|
|
|
|
file := &model.File{
|
|
Name: name,
|
|
BlobSHA256: sha256Key,
|
|
}
|
|
if err := env.fileStore.Create(ctx, file); err != nil {
|
|
panic(fmt.Sprintf("create file failed: %v", err))
|
|
}
|
|
|
|
return file.ID, blob.ID
|
|
}
|
|
|
|
// CreateFile uploads test data and returns only the file ID.
|
|
func (env *TestEnv) CreateFile(name string, content []byte) (int64, error) {
|
|
fileID, _ := env.UploadTestData(name, content)
|
|
return fileID, nil
|
|
}
|
|
|
|
// WaitForTaskStatus polls the DB until the task reaches the target status or timeout.
|
|
func (env *TestEnv) WaitForTaskStatus(taskID int64, status string, timeout time.Duration) error {
|
|
deadline := time.Now().Add(timeout)
|
|
ctx := context.Background()
|
|
for time.Now().Before(deadline) {
|
|
task, err := env.taskStore.GetByID(ctx, taskID)
|
|
if err != nil {
|
|
return fmt.Errorf("get task: %w", err)
|
|
}
|
|
if task == nil {
|
|
return fmt.Errorf("task %d not found", taskID)
|
|
}
|
|
if task.Status == status {
|
|
return nil
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
}
|
|
task, _ := env.taskStore.GetByID(ctx, taskID)
|
|
if task != nil {
|
|
return fmt.Errorf("timeout waiting for task %d to reach status %q, current: %q", taskID, status, task.Status)
|
|
}
|
|
return fmt.Errorf("timeout waiting for task %d to reach status %q", taskID, status)
|
|
}
|
|
|
|
// MakeTaskStale sets the task's updated_at to 31 seconds ago via raw SQL,
|
|
// bypassing GORM's AutoUpdateTime.
|
|
func (env *TestEnv) MakeTaskStale(taskID int64) error {
|
|
sqlDB, err := env.DB.DB()
|
|
if err != nil {
|
|
return fmt.Errorf("get sql.DB: %w", err)
|
|
}
|
|
_, err = sqlDB.Exec("UPDATE hpc_tasks SET updated_at = ? WHERE id = ?", time.Now().Add(-31*time.Second), taskID)
|
|
return err
|
|
}
|
|
|
|
// GetTaskSlurmJobID returns the slurm_job_id for the given task via raw SQL.
|
|
func (env *TestEnv) GetTaskSlurmJobID(taskID int64) (int32, error) {
|
|
sqlDB, err := env.DB.DB()
|
|
if err != nil {
|
|
return 0, fmt.Errorf("get sql.DB: %w", err)
|
|
}
|
|
var jobID sql.NullInt32
|
|
err = sqlDB.QueryRow("SELECT slurm_job_id FROM hpc_tasks WHERE id = ?", taskID).Scan(&jobID)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
if !jobID.Valid {
|
|
return 0, fmt.Errorf("task %d has no slurm_job_id", taskID)
|
|
}
|
|
return jobID.Int32, nil
|
|
}
|