feat(testutil): add MockSlurm, MockMinIO, TestEnv and 37 integration tests

- mockminio: in-memory ObjectStorage with all 11 methods, thread-safe, SHA256 ETag, Range support
- mockslurm: httptest server with 11 Slurm REST API endpoints, job eviction from active to history queue
- testenv: one-line test environment factory (SQLite + MockSlurm + MockMinIO + all stores/services/handlers + httptest server)
- integration tests: 37 tests covering Jobs(5), Cluster(5), App(6), Upload(5), File(4), Folder(4), Task(4), E2E(1)
- no external dependencies, no existing files modified
This commit is contained in:
dailz
2026-04-16 13:23:27 +08:00
parent 73504f9fdb
commit b9b2f0d9b4
16 changed files with 4685 additions and 0 deletions

View File

@@ -0,0 +1,239 @@
// Package mockminio provides an in-memory implementation of storage.ObjectStorage
// for use in tests. It is thread-safe and supports Range reads.
package mockminio
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"gcy_hpc_server/internal/storage"
)
// Compile-time interface check.
var _ storage.ObjectStorage = (*InMemoryStorage)(nil)
// objectMeta holds metadata for a stored object.
type objectMeta struct {
size int64
etag string
lastModified time.Time
contentType string
}
// InMemoryStorage is a thread-safe, in-memory implementation of
// storage.ObjectStorage. All data is kept in memory; no network or disk I/O
// is performed.
type InMemoryStorage struct {
mu sync.RWMutex
objects map[string][]byte
meta map[string]objectMeta
buckets map[string]bool
}
// NewInMemoryStorage returns a ready-to-use InMemoryStorage.
func NewInMemoryStorage() *InMemoryStorage {
return &InMemoryStorage{
objects: make(map[string][]byte),
meta: make(map[string]objectMeta),
buckets: make(map[string]bool),
}
}
// PutObject reads all bytes from reader and stores them under key.
// The ETag is the SHA-256 hash of the data, formatted as hex.
func (s *InMemoryStorage) PutObject(_ context.Context, _, key string, reader io.Reader, _ int64, opts storage.PutObjectOptions) (storage.UploadInfo, error) {
data, err := io.ReadAll(reader)
if err != nil {
return storage.UploadInfo{}, fmt.Errorf("read all: %w", err)
}
h := sha256.Sum256(data)
etag := hex.EncodeToString(h[:])
s.mu.Lock()
s.objects[key] = data
s.meta[key] = objectMeta{
size: int64(len(data)),
etag: etag,
lastModified: time.Now(),
contentType: opts.ContentType,
}
s.mu.Unlock()
return storage.UploadInfo{ETag: etag, Size: int64(len(data))}, nil
}
// GetObject retrieves an object. opts.Start and opts.End control byte-range
// reads. Four cases are supported:
// 1. No range (both nil) → return entire object
// 2. Start only (End nil) → from start to end of object
// 3. End only (Start nil) → from byte 0 to end
// 4. Start + End → standard byte range
func (s *InMemoryStorage) GetObject(_ context.Context, _, key string, opts storage.GetOptions) (io.ReadCloser, storage.ObjectInfo, error) {
s.mu.RLock()
data, ok := s.objects[key]
meta := s.meta[key]
s.mu.RUnlock()
if !ok {
return nil, storage.ObjectInfo{}, fmt.Errorf("object %s not found", key)
}
size := int64(len(data))
// Full object info (Size is always the total object size).
info := storage.ObjectInfo{
Key: key,
Size: size,
ETag: meta.etag,
LastModified: meta.lastModified,
ContentType: meta.contentType,
}
// No range requested → return everything.
if opts.Start == nil && opts.End == nil {
return io.NopCloser(bytes.NewReader(data)), info, nil
}
// Build range. Check each pointer individually to avoid nil dereference.
start := int64(0)
if opts.Start != nil {
start = *opts.Start
}
end := size - 1
if opts.End != nil {
end = *opts.End
}
// Clamp end to last byte.
if end >= size {
end = size - 1
}
if start > end || start < 0 {
return nil, storage.ObjectInfo{}, fmt.Errorf("invalid range: start=%d, end=%d, size=%d", start, end, size)
}
section := io.NewSectionReader(bytes.NewReader(data), start, end-start+1)
return io.NopCloser(section), info, nil
}
// ComposeObject concatenates source objects (in order) into dst.
func (s *InMemoryStorage) ComposeObject(_ context.Context, _, dst string, sources []string) (storage.UploadInfo, error) {
s.mu.Lock()
defer s.mu.Unlock()
var buf bytes.Buffer
for _, src := range sources {
data, ok := s.objects[src]
if !ok {
return storage.UploadInfo{}, fmt.Errorf("source object %s not found", src)
}
buf.Write(data)
}
combined := buf.Bytes()
h := sha256.Sum256(combined)
etag := hex.EncodeToString(h[:])
s.objects[dst] = combined
s.meta[dst] = objectMeta{
size: int64(len(combined)),
etag: etag,
lastModified: time.Now(),
}
return storage.UploadInfo{ETag: etag, Size: int64(len(combined))}, nil
}
// RemoveObject deletes a single object.
func (s *InMemoryStorage) RemoveObject(_ context.Context, _, key string, _ storage.RemoveObjectOptions) error {
s.mu.Lock()
delete(s.objects, key)
delete(s.meta, key)
s.mu.Unlock()
return nil
}
// RemoveObjects deletes multiple objects by key.
func (s *InMemoryStorage) RemoveObjects(_ context.Context, _ string, keys []string, _ storage.RemoveObjectsOptions) error {
s.mu.Lock()
for _, k := range keys {
delete(s.objects, k)
delete(s.meta, k)
}
s.mu.Unlock()
return nil
}
// ListObjects returns object info for all objects matching prefix, sorted by key.
func (s *InMemoryStorage) ListObjects(_ context.Context, _, prefix string, _ bool) ([]storage.ObjectInfo, error) {
s.mu.RLock()
defer s.mu.RUnlock()
var result []storage.ObjectInfo
for k, m := range s.meta {
if strings.HasPrefix(k, prefix) {
result = append(result, storage.ObjectInfo{
Key: k,
Size: m.size,
ETag: m.etag,
LastModified: m.lastModified,
ContentType: m.contentType,
})
}
}
sort.Slice(result, func(i, j int) bool { return result[i].Key < result[j].Key })
return result, nil
}
// BucketExists reports whether the named bucket exists.
func (s *InMemoryStorage) BucketExists(_ context.Context, bucket string) (bool, error) {
s.mu.RLock()
defer s.mu.RUnlock()
return s.buckets[bucket], nil
}
// MakeBucket creates a bucket.
func (s *InMemoryStorage) MakeBucket(_ context.Context, bucket string, _ storage.MakeBucketOptions) error {
s.mu.Lock()
s.buckets[bucket] = true
s.mu.Unlock()
return nil
}
// StatObject returns metadata about an object without downloading it.
func (s *InMemoryStorage) StatObject(_ context.Context, _, key string, _ storage.StatObjectOptions) (storage.ObjectInfo, error) {
s.mu.RLock()
m, ok := s.meta[key]
s.mu.RUnlock()
if !ok {
return storage.ObjectInfo{}, fmt.Errorf("object %s not found", key)
}
return storage.ObjectInfo{
Key: key,
Size: m.size,
ETag: m.etag,
LastModified: m.lastModified,
ContentType: m.contentType,
}, nil
}
// AbortMultipartUpload is a no-op for the in-memory implementation.
func (s *InMemoryStorage) AbortMultipartUpload(_ context.Context, _, _, _ string) error {
return nil
}
// RemoveIncompleteUpload is a no-op for the in-memory implementation.
func (s *InMemoryStorage) RemoveIncompleteUpload(_ context.Context, _, _ string) error {
return nil
}

View File

@@ -0,0 +1,378 @@
package mockminio
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"sync"
"testing"
"gcy_hpc_server/internal/storage"
)
func sha256Hex(data []byte) string {
h := sha256.Sum256(data)
return hex.EncodeToString(h[:])
}
func TestNewInMemoryStorage_ReturnsInitialized(t *testing.T) {
s := NewInMemoryStorage()
if s == nil {
t.Fatal("expected non-nil storage")
}
}
func TestPutObject_StoresData(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("hello world")
info, err := s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{ContentType: "text/plain"})
if err != nil {
t.Fatalf("PutObject: %v", err)
}
wantETag := sha256Hex(data)
if info.ETag != wantETag {
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
}
if info.Size != int64(len(data)) {
t.Errorf("Size = %d, want %d", info.Size, len(data))
}
}
func TestGetObject_FullObject(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("hello world")
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
rc, info, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{})
if err != nil {
t.Fatalf("GetObject: %v", err)
}
defer rc.Close()
got, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if !bytes.Equal(got, data) {
t.Errorf("got %q, want %q", got, data)
}
if info.Size != int64(len(data)) {
t.Errorf("info.Size = %d, want %d", info.Size, len(data))
}
}
func TestGetObject_RangeStartOnly(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("0123456789")
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
start := int64(5)
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{Start: &start})
if err != nil {
t.Fatalf("GetObject: %v", err)
}
defer rc.Close()
got, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
want := data[5:]
if !bytes.Equal(got, want) {
t.Errorf("got %q, want %q", got, want)
}
}
func TestGetObject_RangeEndOnly(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("0123456789")
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
end := int64(4)
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{End: &end})
if err != nil {
t.Fatalf("GetObject: %v", err)
}
defer rc.Close()
got, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
want := data[:5]
if !bytes.Equal(got, want) {
t.Errorf("got %q, want %q", got, want)
}
}
func TestGetObject_RangeStartAndEnd(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("0123456789")
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
start := int64(2)
end := int64(5)
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{Start: &start, End: &end})
if err != nil {
t.Fatalf("GetObject: %v", err)
}
defer rc.Close()
got, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
want := data[2:6]
if !bytes.Equal(got, want) {
t.Errorf("got %q, want %q", got, want)
}
}
func TestGetObject_NotFound(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
_, _, err := s.GetObject(ctx, "bucket", "nonexistent", storage.GetOptions{})
if err == nil {
t.Fatal("expected error for missing object")
}
}
func TestComposeObject_ConcatenatesSources(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
s.PutObject(ctx, "bucket", "part1", bytes.NewReader([]byte("hello ")), 6, storage.PutObjectOptions{})
s.PutObject(ctx, "bucket", "part2", bytes.NewReader([]byte("world")), 5, storage.PutObjectOptions{})
info, err := s.ComposeObject(ctx, "bucket", "combined", []string{"part1", "part2"})
if err != nil {
t.Fatalf("ComposeObject: %v", err)
}
want := []byte("hello world")
if info.Size != int64(len(want)) {
t.Errorf("Size = %d, want %d", info.Size, len(want))
}
wantETag := sha256Hex(want)
if info.ETag != wantETag {
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
}
rc, _, err := s.GetObject(ctx, "bucket", "combined", storage.GetOptions{})
if err != nil {
t.Fatalf("GetObject combined: %v", err)
}
defer rc.Close()
got, _ := io.ReadAll(rc)
if !bytes.Equal(got, want) {
t.Errorf("got %q, want %q", got, want)
}
}
func TestComposeObject_MissingSource(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
_, err := s.ComposeObject(ctx, "bucket", "dst", []string{"missing"})
if err == nil {
t.Fatal("expected error for missing source")
}
}
func TestRemoveObject(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
s.PutObject(ctx, "bucket", "key1", bytes.NewReader([]byte("data")), 4, storage.PutObjectOptions{})
err := s.RemoveObject(ctx, "bucket", "key1", storage.RemoveObjectOptions{})
if err != nil {
t.Fatalf("RemoveObject: %v", err)
}
_, _, err = s.GetObject(ctx, "bucket", "key1", storage.GetOptions{})
if err == nil {
t.Fatal("expected error after removal")
}
}
func TestRemoveObjects(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
for i := 0; i < 5; i++ {
key := fmt.Sprintf("key%d", i)
s.PutObject(ctx, "bucket", key, bytes.NewReader([]byte(key)), int64(len(key)), storage.PutObjectOptions{})
}
err := s.RemoveObjects(ctx, "bucket", []string{"key1", "key3"}, storage.RemoveObjectsOptions{})
if err != nil {
t.Fatalf("RemoveObjects: %v", err)
}
objects, _ := s.ListObjects(ctx, "bucket", "", true)
if len(objects) != 3 {
t.Errorf("got %d objects, want 3", len(objects))
}
}
func TestListObjects(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
s.PutObject(ctx, "bucket", "dir/a", bytes.NewReader([]byte("a")), 1, storage.PutObjectOptions{})
s.PutObject(ctx, "bucket", "dir/b", bytes.NewReader([]byte("bb")), 2, storage.PutObjectOptions{})
s.PutObject(ctx, "bucket", "other/c", bytes.NewReader([]byte("ccc")), 3, storage.PutObjectOptions{})
objects, err := s.ListObjects(ctx, "bucket", "dir/", true)
if err != nil {
t.Fatalf("ListObjects: %v", err)
}
if len(objects) != 2 {
t.Fatalf("got %d objects, want 2", len(objects))
}
if objects[0].Key != "dir/a" || objects[1].Key != "dir/b" {
t.Errorf("keys = %v, want [dir/a dir/b]", []string{objects[0].Key, objects[1].Key})
}
if objects[0].Size != 1 || objects[1].Size != 2 {
t.Errorf("sizes = %v, want [1 2]", []int64{objects[0].Size, objects[1].Size})
}
}
func TestBucketExists(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
ok, _ := s.BucketExists(ctx, "mybucket")
if ok {
t.Error("bucket should not exist yet")
}
s.MakeBucket(ctx, "mybucket", storage.MakeBucketOptions{})
ok, _ = s.BucketExists(ctx, "mybucket")
if !ok {
t.Error("bucket should exist after MakeBucket")
}
}
func TestMakeBucket(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
err := s.MakeBucket(ctx, "test-bucket", storage.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
t.Fatalf("MakeBucket: %v", err)
}
ok, _ := s.BucketExists(ctx, "test-bucket")
if !ok {
t.Error("bucket should exist")
}
}
func TestStatObject(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
data := []byte("test data")
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{ContentType: "text/plain"})
info, err := s.StatObject(ctx, "bucket", "key1", storage.StatObjectOptions{})
if err != nil {
t.Fatalf("StatObject: %v", err)
}
wantETag := sha256Hex(data)
if info.Key != "key1" {
t.Errorf("Key = %q, want %q", info.Key, "key1")
}
if info.Size != int64(len(data)) {
t.Errorf("Size = %d, want %d", info.Size, len(data))
}
if info.ETag != wantETag {
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
}
if info.ContentType != "text/plain" {
t.Errorf("ContentType = %q, want %q", info.ContentType, "text/plain")
}
if info.LastModified.IsZero() {
t.Error("LastModified should not be zero")
}
}
func TestStatObject_NotFound(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
_, err := s.StatObject(ctx, "bucket", "nonexistent", storage.StatObjectOptions{})
if err == nil {
t.Fatal("expected error for missing object")
}
}
func TestAbortMultipartUpload(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
err := s.AbortMultipartUpload(ctx, "bucket", "key", "upload-id")
if err != nil {
t.Fatalf("AbortMultipartUpload: %v", err)
}
}
func TestRemoveIncompleteUpload(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
err := s.RemoveIncompleteUpload(ctx, "bucket", "key")
if err != nil {
t.Fatalf("RemoveIncompleteUpload: %v", err)
}
}
func TestConcurrentAccess(t *testing.T) {
s := NewInMemoryStorage()
ctx := context.Background()
const goroutines = 50
var wg sync.WaitGroup
wg.Add(goroutines * 2)
for i := 0; i < goroutines; i++ {
go func(i int) {
defer wg.Done()
key := fmt.Sprintf("key%d", i%10)
data := []byte(fmt.Sprintf("data-%d", i))
s.PutObject(ctx, "bucket", key, bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
}(i)
go func(i int) {
defer wg.Done()
key := fmt.Sprintf("key%d", i%10)
rc, _, _ := s.GetObject(ctx, "bucket", key, storage.GetOptions{})
if rc != nil {
rc.Close()
}
}(i)
}
wg.Wait()
}

View File

@@ -0,0 +1,544 @@
// Package mockslurm provides a complete HTTP mock server for the Slurm REST API.
// It supports all 11 endpoints (P0: 4 job + P1: 7 cluster/history) and includes
// job eviction from active to history queue on terminal states.
package mockslurm
import (
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"sync"
"time"
"gcy_hpc_server/internal/slurm"
)
// MockJob represents a job tracked by the mock server.
type MockJob struct {
JobID int32
Name string
State string // single state string for internal tracking
Script string
Partition string
WorkDir string
SubmitTime time.Time
StartTime *time.Time
EndTime *time.Time
ExitCode *int32
}
// MockNode represents a node tracked by the mock server.
type MockNode struct {
Name string
}
// MockPartition represents a partition tracked by the mock server.
type MockPartition struct {
Name string
}
// MockSlurm is the mock Slurm API server controller.
type MockSlurm struct {
mu sync.RWMutex
activeJobs map[int32]*MockJob
historyJobs map[int32]*MockJob
nodes []MockNode
partitions []MockPartition
nextID int32
server *httptest.Server
}
// NewMockSlurmServer creates and starts a mock Slurm REST API server.
// Returns the httptest.Server and the MockSlurm controller.
func NewMockSlurmServer() (*httptest.Server, *MockSlurm) {
m := &MockSlurm{
activeJobs: make(map[int32]*MockJob),
historyJobs: make(map[int32]*MockJob),
nodes: []MockNode{
{Name: "node01"},
{Name: "node02"},
{Name: "node03"},
},
partitions: []MockPartition{
{Name: "normal"},
{Name: "gpu"},
},
nextID: 1,
}
mux := http.NewServeMux()
// P0: Exact paths FIRST (before prefix paths)
mux.HandleFunc("/slurm/v0.0.40/job/submit", m.handleJobSubmit)
mux.HandleFunc("/slurm/v0.0.40/jobs", m.handleGetJobs)
// P0: Prefix path for /job/{id} — GET and DELETE
mux.HandleFunc("/slurm/v0.0.40/job/", m.handleJobByID)
// P1: Cluster endpoints
mux.HandleFunc("/slurm/v0.0.40/nodes", m.handleGetNodes)
mux.HandleFunc("/slurm/v0.0.40/node/", m.handleGetNode)
mux.HandleFunc("/slurm/v0.0.40/partitions", m.handleGetPartitions)
mux.HandleFunc("/slurm/v0.0.40/partition/", m.handleGetPartition)
mux.HandleFunc("/slurm/v0.0.40/diag", m.handleDiag)
// P1: SlurmDB endpoints
mux.HandleFunc("/slurmdb/v0.0.40/jobs", m.handleSlurmdbJobs)
mux.HandleFunc("/slurmdb/v0.0.40/job/", m.handleSlurmdbJob)
srv := httptest.NewServer(mux)
m.server = srv
return srv, m
}
// Server returns the underlying httptest.Server.
func (m *MockSlurm) Server() *httptest.Server {
return m.server
}
// ---------------------------------------------------------------------------
// Controller methods
// ---------------------------------------------------------------------------
// SetJobState transitions a job to the given state.
// Terminal states (COMPLETED/FAILED/CANCELLED/TIMEOUT) evict the job from
// activeJobs into historyJobs. RUNNING sets StartTime and stays active.
// PENDING stays in activeJobs.
func (m *MockSlurm) SetJobState(id int32, state string) {
m.mu.Lock()
defer m.mu.Unlock()
mj, ok := m.activeJobs[id]
if !ok {
return
}
now := time.Now()
switch state {
case "RUNNING":
mj.State = state
mj.StartTime = &now
case "COMPLETED", "FAILED", "CANCELLED", "TIMEOUT":
mj.State = state
mj.EndTime = &now
exitCode := int32(0)
if state != "COMPLETED" {
exitCode = 1
}
mj.ExitCode = &exitCode
delete(m.activeJobs, id)
m.historyJobs[id] = mj
case "PENDING":
mj.State = state
}
}
// GetJobState returns the current state of the job with the given ID.
// Returns empty string if the job is not found.
func (m *MockSlurm) GetJobState(id int32) string {
m.mu.RLock()
defer m.mu.RUnlock()
if mj, ok := m.activeJobs[id]; ok {
return mj.State
}
if mj, ok := m.historyJobs[id]; ok {
return mj.State
}
return ""
}
// GetAllActiveJobs returns all jobs currently in the active queue.
func (m *MockSlurm) GetAllActiveJobs() []*MockJob {
m.mu.RLock()
defer m.mu.RUnlock()
jobs := make([]*MockJob, 0, len(m.activeJobs))
for _, mj := range m.activeJobs {
jobs = append(jobs, mj)
}
return jobs
}
// GetAllHistoryJobs returns all jobs in the history queue.
func (m *MockSlurm) GetAllHistoryJobs() []*MockJob {
m.mu.RLock()
defer m.mu.RUnlock()
jobs := make([]*MockJob, 0, len(m.historyJobs))
for _, mj := range m.historyJobs {
jobs = append(jobs, mj)
}
return jobs
}
// ---------------------------------------------------------------------------
// P0: Job Core Endpoints
// ---------------------------------------------------------------------------
// POST /slurm/v0.0.40/job/submit
func (m *MockSlurm) handleJobSubmit(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
var req slurm.JobSubmitReq
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
m.mu.Lock()
jobID := m.nextID
m.nextID++
job := &MockJob{
JobID: jobID,
State: "PENDING", // MUST be non-empty for mapSlurmStateToTaskStatus
SubmitTime: time.Now(),
}
if req.Script != nil {
job.Script = *req.Script
}
if req.Job != nil {
if req.Job.Name != nil {
job.Name = *req.Job.Name
}
if req.Job.Partition != nil {
job.Partition = *req.Job.Partition
}
if req.Job.CurrentWorkingDirectory != nil {
job.WorkDir = *req.Job.CurrentWorkingDirectory
}
if req.Job.Script != nil {
job.Script = *req.Job.Script
}
}
m.activeJobs[jobID] = job
m.mu.Unlock()
resp := NewSubmitResponse(jobID)
writeJSON(w, http.StatusOK, resp)
}
// GET /slurm/v0.0.40/jobs
func (m *MockSlurm) handleGetJobs(w http.ResponseWriter, r *http.Request) {
m.mu.RLock()
jobs := make([]slurm.JobInfo, 0, len(m.activeJobs))
for _, mj := range m.activeJobs {
jobs = append(jobs, m.mockJobToJobInfo(mj))
}
m.mu.RUnlock()
resp := NewJobInfoResponse(jobs)
writeJSON(w, http.StatusOK, resp)
}
// GET/DELETE /slurm/v0.0.40/job/{id}
func (m *MockSlurm) handleJobByID(w http.ResponseWriter, r *http.Request) {
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
// /slurm/v0.0.40/job/{id} → segments[0]="", [1]="slurm", [2]="v0.0.40", [3]="job", [4]=id
if len(segments) < 5 {
m.writeError(w, http.StatusBadRequest, "missing job id")
return
}
last := segments[4]
// Safety net: if "submit" leaks through prefix match, forward to submit handler
if last == "submit" {
m.handleJobSubmit(w, r)
return
}
id, err := strconv.ParseInt(last, 10, 32)
if err != nil {
m.writeError(w, http.StatusBadRequest, "invalid job id")
return
}
switch r.Method {
case http.MethodGet:
m.handleGetJobByID(w, int32(id))
case http.MethodDelete:
m.handleDeleteJobByID(w, int32(id))
default:
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
}
}
func (m *MockSlurm) handleGetJobByID(w http.ResponseWriter, jobID int32) {
m.mu.RLock()
mj, ok := m.activeJobs[jobID]
m.mu.RUnlock()
if !ok {
m.writeError(w, http.StatusNotFound, "job not found")
return
}
ji := m.mockJobToJobInfo(mj)
resp := NewJobInfoResponse([]slurm.JobInfo{ji})
writeJSON(w, http.StatusOK, resp)
}
func (m *MockSlurm) handleDeleteJobByID(w http.ResponseWriter, jobID int32) {
m.SetJobState(jobID, "CANCELLED")
resp := NewDeleteResponse()
writeJSON(w, http.StatusOK, resp)
}
// ---------------------------------------------------------------------------
// P1: Cluster/History Endpoints
// ---------------------------------------------------------------------------
// GET /slurm/v0.0.40/nodes
func (m *MockSlurm) handleGetNodes(w http.ResponseWriter, r *http.Request) {
nodes := make([]slurm.Node, len(m.nodes))
for i, n := range m.nodes {
nodes[i] = slurm.Node{Name: slurm.Ptr(n.Name)}
}
resp := NewNodeResponse(nodes)
writeJSON(w, http.StatusOK, resp)
}
// GET /slurm/v0.0.40/node/{name}
func (m *MockSlurm) handleGetNode(w http.ResponseWriter, r *http.Request) {
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
if len(segments) < 5 {
m.writeError(w, http.StatusBadRequest, "missing node name")
return
}
nodeName := segments[4]
var found *slurm.Node
for _, n := range m.nodes {
if n.Name == nodeName {
found = &slurm.Node{Name: slurm.Ptr(n.Name)}
break
}
}
if found == nil {
m.writeError(w, http.StatusNotFound, "node not found")
return
}
resp := NewNodeResponse([]slurm.Node{*found})
writeJSON(w, http.StatusOK, resp)
}
// GET /slurm/v0.0.40/partitions
func (m *MockSlurm) handleGetPartitions(w http.ResponseWriter, r *http.Request) {
parts := make([]slurm.PartitionInfo, len(m.partitions))
for i, p := range m.partitions {
parts[i] = slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
}
resp := NewPartitionResponse(parts)
writeJSON(w, http.StatusOK, resp)
}
// GET /slurm/v0.0.40/partition/{name}
func (m *MockSlurm) handleGetPartition(w http.ResponseWriter, r *http.Request) {
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
if len(segments) < 5 {
m.writeError(w, http.StatusBadRequest, "missing partition name")
return
}
partName := segments[4]
var found *slurm.PartitionInfo
for _, p := range m.partitions {
if p.Name == partName {
found = &slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
break
}
}
if found == nil {
m.writeError(w, http.StatusNotFound, "partition not found")
return
}
resp := NewPartitionResponse([]slurm.PartitionInfo{*found})
writeJSON(w, http.StatusOK, resp)
}
// GET /slurm/v0.0.40/diag
func (m *MockSlurm) handleDiag(w http.ResponseWriter, r *http.Request) {
resp := NewDiagResponse()
writeJSON(w, http.StatusOK, resp)
}
// GET /slurmdb/v0.0.40/jobs — supports filter params: job_name, start_time, end_time
func (m *MockSlurm) handleSlurmdbJobs(w http.ResponseWriter, r *http.Request) {
m.mu.RLock()
defer m.mu.RUnlock()
jobs := make([]slurm.Job, 0)
for _, mj := range m.historyJobs {
// Filter by job_name
if name := r.URL.Query().Get("job_name"); name != "" && mj.Name != name {
continue
}
// Filter by start_time (job start must be >= filter start)
if startStr := r.URL.Query().Get("start_time"); startStr != "" {
if st, err := strconv.ParseInt(startStr, 10, 64); err == nil {
if mj.StartTime == nil || mj.StartTime.Unix() < st {
continue
}
}
}
// Filter by end_time (job end must be <= filter end)
if endStr := r.URL.Query().Get("end_time"); endStr != "" {
if et, err := strconv.ParseInt(endStr, 10, 64); err == nil {
if mj.EndTime == nil || mj.EndTime.Unix() > et {
continue
}
}
}
jobs = append(jobs, m.mockJobToSlurmDBJob(mj))
}
resp := NewJobHistoryResponse(jobs)
writeJSON(w, http.StatusOK, resp)
}
// GET /slurmdb/v0.0.40/job/{id} — returns OpenapiSlurmdbdJobsResp (with jobs array wrapper)
func (m *MockSlurm) handleSlurmdbJob(w http.ResponseWriter, r *http.Request) {
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
if len(segments) < 5 {
m.writeError(w, http.StatusNotFound, "job not found")
return
}
id, err := strconv.ParseInt(segments[4], 10, 32)
if err != nil {
m.writeError(w, http.StatusNotFound, "job not found")
return
}
m.mu.RLock()
mj, ok := m.historyJobs[int32(id)]
m.mu.RUnlock()
if !ok {
m.writeError(w, http.StatusNotFound, "job not found")
return
}
dbJob := m.mockJobToSlurmDBJob(mj)
resp := NewJobHistoryResponse([]slurm.Job{dbJob})
writeJSON(w, http.StatusOK, resp)
}
// ---------------------------------------------------------------------------
// Conversion helpers
// ---------------------------------------------------------------------------
// mockJobToJobInfo converts a MockJob to an active-endpoint JobInfo.
// Uses buildActiveJobState for flat []string state format: ["RUNNING"].
func (m *MockSlurm) mockJobToJobInfo(mj *MockJob) slurm.JobInfo {
ji := slurm.JobInfo{
JobID: slurm.Ptr(mj.JobID),
JobState: buildActiveJobState(mj.State), // MUST be non-empty []string
Name: slurm.Ptr(mj.Name),
Partition: slurm.Ptr(mj.Partition),
CurrentWorkingDirectory: slurm.Ptr(mj.WorkDir),
SubmitTime: &slurm.Uint64NoVal{Number: slurm.Ptr(mj.SubmitTime.Unix())},
}
if mj.StartTime != nil {
ji.StartTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.StartTime.Unix())}
}
if mj.EndTime != nil {
ji.EndTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.EndTime.Unix())}
}
if mj.ExitCode != nil {
ji.ExitCode = &slurm.ProcessExitCodeVerbose{
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
}
}
return ji
}
// mockJobToSlurmDBJob converts a MockJob to a SlurmDB history Job.
// Uses buildHistoryJobState for nested state format: {current: ["COMPLETED"], reason: ""}.
func (m *MockSlurm) mockJobToSlurmDBJob(mj *MockJob) slurm.Job {
dbJob := slurm.Job{
JobID: slurm.Ptr(mj.JobID),
Name: slurm.Ptr(mj.Name),
Partition: slurm.Ptr(mj.Partition),
WorkingDirectory: slurm.Ptr(mj.WorkDir),
Script: slurm.Ptr(mj.Script),
State: buildHistoryJobState(mj.State),
Time: &slurm.JobTime{
Submission: slurm.Ptr(mj.SubmitTime.Unix()),
},
}
if mj.StartTime != nil {
dbJob.Time.Start = slurm.Ptr(mj.StartTime.Unix())
}
if mj.EndTime != nil {
dbJob.Time.End = slurm.Ptr(mj.EndTime.Unix())
}
if mj.ExitCode != nil {
dbJob.ExitCode = &slurm.ProcessExitCodeVerbose{
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
}
}
return dbJob
}
// ---------------------------------------------------------------------------
// Error helpers
// ---------------------------------------------------------------------------
// writeJSON writes a JSON response with the given status code.
func writeJSON(w http.ResponseWriter, code int, v interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(v)
}
// writeError writes an HTTP error with an OpenapiResp body containing
// meta and errors fields. This is critical for CheckResponse/IsNotFound
// to work correctly — the response body must be parseable as OpenapiResp.
func (m *MockSlurm) writeError(w http.ResponseWriter, statusCode int, message string) {
meta := slurm.OpenapiMeta{
Plugin: &slurm.MetaPlugin{
Type: slurm.Ptr("openapi/v0.0.40"),
Name: slurm.Ptr(""),
},
Slurm: &slurm.MetaSlurm{
Version: &slurm.MetaSlurmVersion{
Major: slurm.Ptr("24"),
Micro: slurm.Ptr("0"),
Minor: slurm.Ptr("5"),
},
Release: slurm.Ptr("24.05.0"),
},
}
resp := slurm.OpenapiResp{
Meta: &meta,
Errors: slurm.OpenapiErrors{
{
ErrorNumber: slurm.Ptr(int32(0)),
Error: slurm.Ptr(message),
},
},
Warnings: slurm.OpenapiWarnings{},
}
writeJSON(w, statusCode, resp)
}

View File

@@ -0,0 +1,679 @@
package mockslurm
import (
"context"
"encoding/json"
"strconv"
"strings"
"testing"
"gcy_hpc_server/internal/slurm"
)
func setupTestClient(t *testing.T) (*slurm.Client, *MockSlurm) {
t.Helper()
srv, mock := NewMockSlurmServer()
t.Cleanup(srv.Close)
client, err := slurm.NewClientWithOpts(srv.URL, slurm.WithHTTPClient(srv.Client()))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
return client, mock
}
func submitTestJob(t *testing.T, client *slurm.Client, name, partition, workDir, script string) int32 {
t.Helper()
ctx := context.Background()
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
Script: slurm.Ptr(script),
Job: &slurm.JobDescMsg{
Name: slurm.Ptr(name),
Partition: slurm.Ptr(partition),
CurrentWorkingDirectory: slurm.Ptr(workDir),
},
})
if err != nil {
t.Fatalf("SubmitJob failed: %v", err)
}
if resp.JobID == nil {
t.Fatal("SubmitJob returned nil JobID")
}
return *resp.JobID
}
// ---------------------------------------------------------------------------
// P0: Submit Job
// ---------------------------------------------------------------------------
func TestSubmitJob(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
Script: slurm.Ptr("#!/bin/bash\necho hello"),
Job: &slurm.JobDescMsg{
Name: slurm.Ptr("test-job"),
Partition: slurm.Ptr("normal"),
CurrentWorkingDirectory: slurm.Ptr("/tmp/work"),
},
})
if err != nil {
t.Fatalf("SubmitJob failed: %v", err)
}
if resp.JobID == nil || *resp.JobID != 1 {
t.Errorf("JobID = %v, want 1", resp.JobID)
}
if resp.StepID == nil || *resp.StepID != "Scalar" {
t.Errorf("StepID = %v, want Scalar", resp.StepID)
}
if resp.Result == nil || resp.Result.JobID == nil || *resp.Result.JobID != 1 {
t.Errorf("Result.JobID = %v, want 1", resp.Result)
}
}
func TestSubmitJobAutoIncrement(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
for i := 1; i <= 3; i++ {
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
Script: slurm.Ptr("#!/bin/bash\necho " + strconv.Itoa(i)),
})
if err != nil {
t.Fatalf("SubmitJob %d failed: %v", i, err)
}
if resp.JobID == nil || *resp.JobID != int32(i) {
t.Errorf("job %d: JobID = %v, want %d", i, resp.JobID, i)
}
}
}
// ---------------------------------------------------------------------------
// P0: Get All Jobs
// ---------------------------------------------------------------------------
func TestGetJobsEmpty(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Jobs.GetJobs(ctx, nil)
if err != nil {
t.Fatalf("GetJobs failed: %v", err)
}
if len(resp.Jobs) != 0 {
t.Errorf("len(Jobs) = %d, want 0", len(resp.Jobs))
}
}
func TestGetJobsWithSubmitted(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
submitTestJob(t, client, "job-a", "normal", "/tmp/a", "#!/bin/bash\ntrue")
submitTestJob(t, client, "job-b", "gpu", "/tmp/b", "#!/bin/bash\nfalse")
resp, _, err := client.Jobs.GetJobs(ctx, nil)
if err != nil {
t.Fatalf("GetJobs failed: %v", err)
}
if len(resp.Jobs) != 2 {
t.Fatalf("len(Jobs) = %d, want 2", len(resp.Jobs))
}
names := map[string]bool{}
for _, j := range resp.Jobs {
if j.Name != nil {
names[*j.Name] = true
}
if len(j.JobState) == 0 || j.JobState[0] != "PENDING" {
t.Errorf("JobState = %v, want [PENDING]", j.JobState)
}
}
if !names["job-a"] || !names["job-b"] {
t.Errorf("expected job-a and job-b, got %v", names)
}
}
// ---------------------------------------------------------------------------
// P0: Get Job By ID
// ---------------------------------------------------------------------------
func TestGetJobByID(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "single-job", "normal", "/tmp/work", "#!/bin/bash\necho hi")
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
if err != nil {
t.Fatalf("GetJob failed: %v", err)
}
if len(resp.Jobs) != 1 {
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
}
job := resp.Jobs[0]
if job.JobID == nil || *job.JobID != jobID {
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
}
if job.Name == nil || *job.Name != "single-job" {
t.Errorf("Name = %v, want single-job", job.Name)
}
if job.Partition == nil || *job.Partition != "normal" {
t.Errorf("Partition = %v, want normal", job.Partition)
}
if job.CurrentWorkingDirectory == nil || *job.CurrentWorkingDirectory != "/tmp/work" {
t.Errorf("CurrentWorkingDirectory = %v, want /tmp/work", job.CurrentWorkingDirectory)
}
if job.SubmitTime == nil || job.SubmitTime.Number == nil {
t.Error("SubmitTime should be non-nil")
}
}
func TestGetJobByIDNotFound(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
_, _, err := client.Jobs.GetJob(ctx, "999", nil)
if err == nil {
t.Fatal("expected error for unknown job ID, got nil")
}
if !slurm.IsNotFound(err) {
t.Errorf("error type = %T, want SlurmAPIError with 404", err)
}
}
// ---------------------------------------------------------------------------
// P0: Delete Job (triggers eviction)
// ---------------------------------------------------------------------------
func TestDeleteJob(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "cancel-me", "normal", "/tmp", "#!/bin/bash\nsleep 99")
resp, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
if err != nil {
t.Fatalf("DeleteJob failed: %v", err)
}
if resp == nil {
t.Fatal("DeleteJob returned nil response")
}
if len(mock.GetAllActiveJobs()) != 0 {
t.Error("active jobs should be empty after delete")
}
if len(mock.GetAllHistoryJobs()) != 1 {
t.Error("history should contain 1 job after delete")
}
if mock.GetJobState(jobID) != "CANCELLED" {
t.Errorf("job state = %q, want CANCELLED", mock.GetJobState(jobID))
}
}
func TestDeleteJobEvictsFromActive(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "to-delete", "normal", "/tmp", "#!/bin/bash\ntrue")
_, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
if err != nil {
t.Fatalf("DeleteJob failed: %v", err)
}
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
if err == nil {
t.Fatal("expected 404 after delete, got nil error")
}
if !slurm.IsNotFound(err) {
t.Errorf("error = %v, want not-found", err)
}
}
// ---------------------------------------------------------------------------
// P0: Job State format ([]string, not bare string)
// ---------------------------------------------------------------------------
func TestJobStateIsStringArray(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
submitTestJob(t, client, "state-test", "normal", "/tmp", "#!/bin/bash\necho")
resp, _, err := client.Jobs.GetJobs(ctx, nil)
if err != nil {
t.Fatalf("GetJobs failed: %v", err)
}
if len(resp.Jobs) == 0 {
t.Fatal("expected at least one job")
}
job := resp.Jobs[0]
if len(job.JobState) == 0 {
t.Fatal("JobState is empty — must be non-empty []string to avoid mapSlurmStateToTaskStatus silent failure")
}
if job.JobState[0] != "PENDING" {
t.Errorf("JobState[0] = %q, want %q", job.JobState[0], "PENDING")
}
raw, err := json.Marshal(job)
if err != nil {
t.Fatalf("Marshal job: %v", err)
}
if !strings.Contains(string(raw), `"job_state":["PENDING"]`) {
t.Errorf("JobState JSON = %s, want array format [\"PENDING\"]", string(raw))
}
}
// ---------------------------------------------------------------------------
// P0: Full Job Lifecycle
// ---------------------------------------------------------------------------
func TestJobLifecycle(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "lifecycle", "normal", "/tmp/lc", "#!/bin/bash\necho lifecycle")
// Verify PENDING in active
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
if err != nil {
t.Fatalf("GetJob PENDING: %v", err)
}
if resp.Jobs[0].JobState[0] != "PENDING" {
t.Errorf("initial state = %v, want PENDING", resp.Jobs[0].JobState)
}
if len(mock.GetAllActiveJobs()) != 1 {
t.Error("should have 1 active job")
}
// Transition to RUNNING
mock.SetJobState(jobID, "RUNNING")
resp, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
if err != nil {
t.Fatalf("GetJob RUNNING: %v", err)
}
if resp.Jobs[0].JobState[0] != "RUNNING" {
t.Errorf("running state = %v, want RUNNING", resp.Jobs[0].JobState)
}
if resp.Jobs[0].StartTime == nil || resp.Jobs[0].StartTime.Number == nil {
t.Error("StartTime should be set for RUNNING job")
}
if len(mock.GetAllActiveJobs()) != 1 {
t.Error("should still have 1 active job after RUNNING")
}
// Transition to COMPLETED — triggers eviction
mock.SetJobState(jobID, "COMPLETED")
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
if err == nil {
t.Fatal("expected 404 after COMPLETED (evicted from active)")
}
if !slurm.IsNotFound(err) {
t.Errorf("error = %v, want not-found", err)
}
if len(mock.GetAllActiveJobs()) != 0 {
t.Error("active jobs should be empty after COMPLETED")
}
if len(mock.GetAllHistoryJobs()) != 1 {
t.Error("history should contain 1 job after COMPLETED")
}
if mock.GetJobState(jobID) != "COMPLETED" {
t.Errorf("state = %q, want COMPLETED", mock.GetJobState(jobID))
}
// Verify history endpoint returns the job
histResp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
if err != nil {
t.Fatalf("SlurmdbJobs.GetJob: %v", err)
}
if len(histResp.Jobs) != 1 {
t.Fatalf("history jobs = %d, want 1", len(histResp.Jobs))
}
histJob := histResp.Jobs[0]
if histJob.State == nil || len(histJob.State.Current) == 0 || histJob.State.Current[0] != "COMPLETED" {
t.Errorf("history state = %v, want current=[COMPLETED]", histJob.State)
}
if histJob.ExitCode == nil || histJob.ExitCode.ReturnCode == nil || histJob.ExitCode.ReturnCode.Number == nil {
t.Error("history ExitCode should be set")
} else if *histJob.ExitCode.ReturnCode.Number != 0 {
t.Errorf("exit code = %d, want 0 for COMPLETED", *histJob.ExitCode.ReturnCode.Number)
}
}
// ---------------------------------------------------------------------------
// P1: Nodes
// ---------------------------------------------------------------------------
func TestGetNodes(t *testing.T) {
client, mock := setupTestClient(t)
_ = mock
ctx := context.Background()
resp, _, err := client.Nodes.GetNodes(ctx, nil)
if err != nil {
t.Fatalf("GetNodes failed: %v", err)
}
if resp.Nodes == nil {
t.Fatal("Nodes is nil")
}
if len(*resp.Nodes) != 3 {
t.Errorf("len(Nodes) = %d, want 3", len(*resp.Nodes))
}
names := make([]string, len(*resp.Nodes))
for i, n := range *resp.Nodes {
if n.Name == nil {
t.Errorf("node %d: Name is nil", i)
} else {
names[i] = *n.Name
}
}
for _, expected := range []string{"node01", "node02", "node03"} {
found := false
for _, n := range names {
if n == expected {
found = true
break
}
}
if !found {
t.Errorf("missing node %q in %v", expected, names)
}
}
}
func TestGetNode(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Nodes.GetNode(ctx, "node02", nil)
if err != nil {
t.Fatalf("GetNode failed: %v", err)
}
if resp.Nodes == nil || len(*resp.Nodes) != 1 {
t.Fatalf("expected 1 node, got %v", resp.Nodes)
}
if (*resp.Nodes)[0].Name == nil || *(*resp.Nodes)[0].Name != "node02" {
t.Errorf("Name = %v, want node02", (*resp.Nodes)[0].Name)
}
}
func TestGetNodeNotFound(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
_, _, err := client.Nodes.GetNode(ctx, "nonexistent", nil)
if err == nil {
t.Fatal("expected error for unknown node, got nil")
}
if !slurm.IsNotFound(err) {
t.Errorf("error = %v, want not-found", err)
}
}
// ---------------------------------------------------------------------------
// P1: Partitions
// ---------------------------------------------------------------------------
func TestGetPartitions(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Partitions.GetPartitions(ctx, nil)
if err != nil {
t.Fatalf("GetPartitions failed: %v", err)
}
if resp.Partitions == nil {
t.Fatal("Partitions is nil")
}
if len(*resp.Partitions) != 2 {
t.Errorf("len(Partitions) = %d, want 2", len(*resp.Partitions))
}
names := map[string]bool{}
for _, p := range *resp.Partitions {
if p.Name != nil {
names[*p.Name] = true
}
}
if !names["normal"] || !names["gpu"] {
t.Errorf("expected normal and gpu partitions, got %v", names)
}
}
func TestGetPartition(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Partitions.GetPartition(ctx, "gpu", nil)
if err != nil {
t.Fatalf("GetPartition failed: %v", err)
}
if resp.Partitions == nil || len(*resp.Partitions) != 1 {
t.Fatalf("expected 1 partition, got %v", resp.Partitions)
}
if (*resp.Partitions)[0].Name == nil || *(*resp.Partitions)[0].Name != "gpu" {
t.Errorf("Name = %v, want gpu", (*resp.Partitions)[0].Name)
}
}
func TestGetPartitionNotFound(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
_, _, err := client.Partitions.GetPartition(ctx, "nonexistent", nil)
if err == nil {
t.Fatal("expected error for unknown partition, got nil")
}
if !slurm.IsNotFound(err) {
t.Errorf("error = %v, want not-found", err)
}
}
// ---------------------------------------------------------------------------
// P1: Diag
// ---------------------------------------------------------------------------
func TestGetDiag(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.Diag.GetDiag(ctx)
if err != nil {
t.Fatalf("GetDiag failed: %v", err)
}
if resp.Statistics == nil {
t.Fatal("Statistics is nil")
}
if resp.Statistics.ServerThreadCount == nil || *resp.Statistics.ServerThreadCount != 3 {
t.Errorf("ServerThreadCount = %v, want 3", resp.Statistics.ServerThreadCount)
}
if resp.Statistics.AgentQueueSize == nil || *resp.Statistics.AgentQueueSize != 0 {
t.Errorf("AgentQueueSize = %v, want 0", resp.Statistics.AgentQueueSize)
}
}
// ---------------------------------------------------------------------------
// P1: SlurmDB Jobs
// ---------------------------------------------------------------------------
func TestSlurmdbGetJobsEmpty(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
if err != nil {
t.Fatalf("GetJobs failed: %v", err)
}
if len(resp.Jobs) != 0 {
t.Errorf("len(Jobs) = %d, want 0 (no history)", len(resp.Jobs))
}
}
func TestSlurmdbGetJobsAfterEviction(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "hist-job", "normal", "/tmp/h", "#!/bin/bash\necho hist")
mock.SetJobState(jobID, "RUNNING")
mock.SetJobState(jobID, "COMPLETED")
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
if err != nil {
t.Fatalf("GetJobs failed: %v", err)
}
if len(resp.Jobs) != 1 {
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
}
job := resp.Jobs[0]
if job.Name == nil || *job.Name != "hist-job" {
t.Errorf("Name = %v, want hist-job", job.Name)
}
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "COMPLETED" {
t.Errorf("State = %v, want current=[COMPLETED]", job.State)
}
if job.Time == nil || job.Time.Submission == nil {
t.Error("Time.Submission should be set")
}
}
func TestSlurmdbGetJobByID(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "single-hist", "normal", "/tmp/sh", "#!/bin/bash\nexit 1")
mock.SetJobState(jobID, "FAILED")
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
if err != nil {
t.Fatalf("GetJob failed: %v", err)
}
if len(resp.Jobs) != 1 {
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
}
job := resp.Jobs[0]
if job.JobID == nil || *job.JobID != jobID {
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
}
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "FAILED" {
t.Errorf("State = %v, want current=[FAILED]", job.State)
}
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
t.Error("ExitCode should be set")
} else if *job.ExitCode.ReturnCode.Number != 1 {
t.Errorf("exit code = %d, want 1 for FAILED", *job.ExitCode.ReturnCode.Number)
}
}
func TestSlurmdbGetJobNotFound(t *testing.T) {
client, _ := setupTestClient(t)
ctx := context.Background()
_, _, err := client.SlurmdbJobs.GetJob(ctx, "999")
if err == nil {
t.Fatal("expected error for unknown history job, got nil")
}
if !slurm.IsNotFound(err) {
t.Errorf("error = %v, want not-found", err)
}
}
func TestSlurmdbJobStateIsNested(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
jobID := submitTestJob(t, client, "nested-state", "gpu", "/tmp/ns", "#!/bin/bash\ntrue")
mock.SetJobState(jobID, "COMPLETED")
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
if err != nil {
t.Fatalf("GetJob failed: %v", err)
}
job := resp.Jobs[0]
if job.State == nil {
t.Fatal("State is nil — must be nested {current: [...], reason: \"\"}")
}
if len(job.State.Current) == 0 {
t.Fatal("State.Current is empty")
}
if job.State.Current[0] != "COMPLETED" {
t.Errorf("State.Current[0] = %q, want COMPLETED", job.State.Current[0])
}
if job.State.Reason == nil || *job.State.Reason != "" {
t.Errorf("State.Reason = %v, want empty string", job.State.Reason)
}
raw, err := json.Marshal(job)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
rawStr := string(raw)
if !strings.Contains(rawStr, `"state":{"current":["COMPLETED"]`) {
t.Errorf("state JSON should use nested format, got: %s", rawStr)
}
}
func TestSlurmdbJobsFilterByName(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
id1 := submitTestJob(t, client, "match-me", "normal", "/tmp", "#!/bin/bash\ntrue")
id2 := submitTestJob(t, client, "other-job", "normal", "/tmp", "#!/bin/bash\ntrue")
mock.SetJobState(id1, "COMPLETED")
mock.SetJobState(id2, "COMPLETED")
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, &slurm.GetSlurmdbJobsOptions{
JobName: slurm.Ptr("match-me"),
})
if err != nil {
t.Fatalf("GetJobs with filter: %v", err)
}
if len(resp.Jobs) != 1 {
t.Fatalf("len(Jobs) = %d, want 1 (filtered by name)", len(resp.Jobs))
}
if resp.Jobs[0].Name == nil || *resp.Jobs[0].Name != "match-me" {
t.Errorf("Name = %v, want match-me", resp.Jobs[0].Name)
}
}
// ---------------------------------------------------------------------------
// SetJobState terminal state exit codes
// ---------------------------------------------------------------------------
func TestSetJobStateExitCodes(t *testing.T) {
client, mock := setupTestClient(t)
ctx := context.Background()
cases := []struct {
state string
wantExit int64
}{
{"COMPLETED", 0},
{"FAILED", 1},
{"CANCELLED", 1},
{"TIMEOUT", 1},
}
for i, tc := range cases {
jobID := submitTestJob(t, client, "exit-"+strconv.Itoa(i), "normal", "/tmp", "#!/bin/bash\ntrue")
mock.SetJobState(jobID, tc.state)
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
if err != nil {
t.Fatalf("GetJob(%d) %s: %v", jobID, tc.state, err)
}
job := resp.Jobs[0]
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
t.Errorf("%s: ExitCode not set", tc.state)
continue
}
if *job.ExitCode.ReturnCode.Number != tc.wantExit {
t.Errorf("%s: exit code = %d, want %d", tc.state, *job.ExitCode.ReturnCode.Number, tc.wantExit)
}
}
}

View File

@@ -0,0 +1,134 @@
// Package mockslurm provides response builder helpers that generate JSON
// matching Openapi* types from the internal/slurm package.
package mockslurm
import (
"gcy_hpc_server/internal/slurm"
)
// newMeta returns standard OpenapiMeta with plugin type "openapi/v0.0.40"
// and Slurm version 24.05.0.
func newMeta() slurm.OpenapiMeta {
return slurm.OpenapiMeta{
Plugin: &slurm.MetaPlugin{
Type: slurm.Ptr("openapi/v0.0.40"),
Name: slurm.Ptr("slurmrestd"),
DataParser: slurm.Ptr("json/v0.0.40"),
},
Slurm: &slurm.MetaSlurm{
Version: &slurm.MetaSlurmVersion{
Major: slurm.Ptr("24"),
Micro: slurm.Ptr("0"),
Minor: slurm.Ptr("5"),
},
Release: slurm.Ptr("24.05.0"),
},
}
}
// NewSubmitResponse builds an OpenapiJobSubmitResponse with the given jobID.
func NewSubmitResponse(jobID int32) slurm.OpenapiJobSubmitResponse {
return slurm.OpenapiJobSubmitResponse{
Result: &slurm.JobSubmitResponseMsg{
JobID: slurm.Ptr(jobID),
},
JobID: slurm.Ptr(jobID),
StepID: slurm.Ptr("Scalar"),
Meta: &slurm.OpenapiMeta{},
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewJobInfoResponse builds an OpenapiJobInfoResp wrapping the given jobs.
func NewJobInfoResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
meta := newMeta()
return slurm.OpenapiJobInfoResp{
Jobs: jobs,
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewJobListResponse is an alias for NewJobInfoResponse.
func NewJobListResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
return NewJobInfoResponse(jobs)
}
// NewDeleteResponse builds an OpenapiResp with meta and empty errors/warnings.
func NewDeleteResponse() slurm.OpenapiResp {
meta := newMeta()
return slurm.OpenapiResp{
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewNodeResponse builds an OpenapiNodesResp wrapping the given nodes.
func NewNodeResponse(nodes []slurm.Node) slurm.OpenapiNodesResp {
meta := newMeta()
n := slurm.Nodes(nodes)
return slurm.OpenapiNodesResp{
Nodes: &n,
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewPartitionResponse builds an OpenapiPartitionResp wrapping the given partitions.
func NewPartitionResponse(partitions []slurm.PartitionInfo) slurm.OpenapiPartitionResp {
meta := newMeta()
p := slurm.PartitionInfoMsg(partitions)
return slurm.OpenapiPartitionResp{
Partitions: &p,
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewDiagResponse builds an OpenapiDiagResp with stats and meta.
func NewDiagResponse() slurm.OpenapiDiagResp {
meta := newMeta()
return slurm.OpenapiDiagResp{
Statistics: &slurm.StatsMsg{
ServerThreadCount: slurm.Ptr(int32(3)),
AgentQueueSize: slurm.Ptr(int32(0)),
JobsRunning: slurm.Ptr(int32(0)),
JobsPending: slurm.Ptr(int32(0)),
ScheduleQueueLength: slurm.Ptr(int32(0)),
},
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// NewJobHistoryResponse builds an OpenapiSlurmdbdJobsResp wrapping the given SlurmDBD jobs.
func NewJobHistoryResponse(jobs []slurm.Job) slurm.OpenapiSlurmdbdJobsResp {
meta := newMeta()
return slurm.OpenapiSlurmdbdJobsResp{
Jobs: jobs,
Meta: &meta,
Errors: slurm.OpenapiErrors{},
Warnings: slurm.OpenapiWarnings{},
}
}
// buildActiveJobState returns a flat string array for the active endpoint
// job_state field (e.g. ["RUNNING"]).
func buildActiveJobState(states ...string) []string {
return states
}
// buildHistoryJobState returns a nested JobState object for the SlurmDB
// history endpoint (e.g. {current: ["COMPLETED"], reason: ""}).
func buildHistoryJobState(states ...string) *slurm.JobState {
return &slurm.JobState{
Current: states,
Reason: slurm.Ptr(""),
}
}

View File

@@ -0,0 +1,291 @@
package mockslurm
import (
"encoding/json"
"testing"
"gcy_hpc_server/internal/slurm"
)
func TestNewSubmitResponse(t *testing.T) {
resp := NewSubmitResponse(42)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiJobSubmitResponse
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if got.Result == nil || got.Result.JobID == nil || *got.Result.JobID != 42 {
t.Errorf("result.job_id = %v, want 42", got.Result)
}
if got.JobID == nil || *got.JobID != 42 {
t.Errorf("job_id = %v, want 42", got.JobID)
}
var raw map[string]interface{}
json.Unmarshal(data, &raw)
if _, ok := raw["job_id"]; !ok {
t.Error("missing snake_case field 'job_id'")
}
if _, ok := raw["result"]; !ok {
t.Error("missing field 'result'")
}
}
func TestNewJobInfoResponse(t *testing.T) {
jobs := []slurm.JobInfo{
{JobID: slurm.Ptr(int32(1)), Name: slurm.Ptr("test1"), JobState: []string{"RUNNING"}},
{JobID: slurm.Ptr(int32(2)), Name: slurm.Ptr("test2"), JobState: []string{"PENDING"}},
}
resp := NewJobInfoResponse(jobs)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiJobInfoResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if len(got.Jobs) != 2 {
t.Fatalf("len(jobs) = %d, want 2", len(got.Jobs))
}
if *got.Jobs[0].JobID != 1 {
t.Errorf("jobs[0].job_id = %d, want 1", *got.Jobs[0].JobID)
}
if got.Jobs[0].JobState[0] != "RUNNING" {
t.Errorf("jobs[0].job_state = %v, want [RUNNING]", got.Jobs[0].JobState)
}
var raw map[string]interface{}
json.Unmarshal(data, &raw)
jobsRaw := raw["jobs"].([]interface{})
job0 := jobsRaw[0].(map[string]interface{})
stateRaw, ok := job0["job_state"]
if !ok {
t.Error("missing snake_case field 'job_state'")
}
stateArr, ok := stateRaw.([]interface{})
if !ok || len(stateArr) != 1 || stateArr[0].(string) != "RUNNING" {
t.Errorf("job_state = %v, want array [\"RUNNING\"]", stateRaw)
}
}
func TestNewJobListResponse(t *testing.T) {
jobs := []slurm.JobInfo{
{JobID: slurm.Ptr(int32(10)), Name: slurm.Ptr("listjob")},
}
resp := NewJobListResponse(jobs)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiJobInfoResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if len(got.Jobs) != 1 || *got.Jobs[0].JobID != 10 {
t.Errorf("jobs = %+v, want single job with id 10", got.Jobs)
}
}
func TestNewDeleteResponse(t *testing.T) {
resp := NewDeleteResponse()
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if got.Meta == nil {
t.Error("meta is nil")
}
if len(got.Errors) != 0 {
t.Errorf("errors = %v, want empty", got.Errors)
}
if len(got.Warnings) != 0 {
t.Errorf("warnings = %v, want empty", got.Warnings)
}
if got.Meta.Plugin == nil || got.Meta.Plugin.Type == nil || *got.Meta.Plugin.Type != "openapi/v0.0.40" {
t.Error("meta.plugin.type missing or wrong")
}
}
func TestNewNodeResponse(t *testing.T) {
nodes := []slurm.Node{
{Name: slurm.Ptr("node1"), State: []string{"IDLE"}},
{Name: slurm.Ptr("node2"), State: []string{"ALLOCATED"}},
}
resp := NewNodeResponse(nodes)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiNodesResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if got.Nodes == nil || len(*got.Nodes) != 2 {
t.Fatalf("nodes = %v, want 2 nodes", got.Nodes)
}
gotNodes := *got.Nodes
if *gotNodes[0].Name != "node1" {
t.Errorf("nodes[0].name = %s, want node1", *gotNodes[0].Name)
}
if gotNodes[0].State[0] != "IDLE" {
t.Errorf("nodes[0].state = %v, want [IDLE]", gotNodes[0].State)
}
}
func TestNewPartitionResponse(t *testing.T) {
partitions := []slurm.PartitionInfo{
{Name: slurm.Ptr("normal")},
{Name: slurm.Ptr("debug")},
}
resp := NewPartitionResponse(partitions)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiPartitionResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if got.Partitions == nil || len(*got.Partitions) != 2 {
t.Fatalf("partitions = %v, want 2", got.Partitions)
}
gotParts := *got.Partitions
if *gotParts[0].Name != "normal" {
t.Errorf("partitions[0].name = %s, want normal", *gotParts[0].Name)
}
var raw map[string]interface{}
json.Unmarshal(data, &raw)
if _, ok := raw["partitions"]; !ok {
t.Error("missing snake_case field 'partitions'")
}
}
func TestNewDiagResponse(t *testing.T) {
resp := NewDiagResponse()
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiDiagResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if got.Statistics == nil {
t.Fatal("statistics is nil")
}
if got.Statistics.ServerThreadCount == nil || *got.Statistics.ServerThreadCount != 3 {
t.Errorf("statistics.server_thread_count = %v, want 3", got.Statistics.ServerThreadCount)
}
if got.Meta == nil {
t.Error("meta is nil")
}
}
func TestNewJobHistoryResponse(t *testing.T) {
jobs := []slurm.Job{
{JobID: slurm.Ptr(int32(100)), Name: slurm.Ptr("histjob"), State: &slurm.JobState{Current: []string{"COMPLETED"}}},
}
resp := NewJobHistoryResponse(jobs)
data, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var got slurm.OpenapiSlurmdbdJobsResp
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("unmarshal: %v", err)
}
if len(got.Jobs) != 1 {
t.Fatalf("len(jobs) = %d, want 1", len(got.Jobs))
}
if *got.Jobs[0].JobID != 100 {
t.Errorf("jobs[0].job_id = %d, want 100", *got.Jobs[0].JobID)
}
if got.Jobs[0].State == nil || len(got.Jobs[0].State.Current) != 1 || got.Jobs[0].State.Current[0] != "COMPLETED" {
t.Errorf("jobs[0].state = %+v, want current=[COMPLETED]", got.Jobs[0].State)
}
var raw map[string]interface{}
json.Unmarshal(data, &raw)
jobsRaw := raw["jobs"].([]interface{})
job0 := jobsRaw[0].(map[string]interface{})
stateRaw := job0["state"].(map[string]interface{})
currentRaw := stateRaw["current"].([]interface{})
if currentRaw[0].(string) != "COMPLETED" {
t.Errorf("state.current = %v, want [COMPLETED]", currentRaw)
}
}
func TestBuildActiveJobState(t *testing.T) {
states := buildActiveJobState("RUNNING", "COMPLETING")
if len(states) != 2 {
t.Fatalf("len = %d, want 2", len(states))
}
if states[0] != "RUNNING" || states[1] != "COMPLETING" {
t.Errorf("states = %v, want [RUNNING, COMPLETING]", states)
}
}
func TestBuildHistoryJobState(t *testing.T) {
state := buildHistoryJobState("COMPLETED")
if state == nil {
t.Fatal("state is nil")
}
if len(state.Current) != 1 || state.Current[0] != "COMPLETED" {
t.Errorf("current = %v, want [COMPLETED]", state.Current)
}
if state.Reason == nil || *state.Reason != "" {
t.Errorf("reason = %v, want empty string", state.Reason)
}
data, err := json.Marshal(state)
if err != nil {
t.Fatalf("marshal: %v", err)
}
var raw map[string]interface{}
json.Unmarshal(data, &raw)
current, ok := raw["current"].([]interface{})
if !ok {
t.Fatalf("current is not an array: %v", raw["current"])
}
if current[0].(string) != "COMPLETED" {
t.Errorf("current[0] = %v, want COMPLETED", current[0])
}
}
func TestNewMetaFields(t *testing.T) {
meta := newMeta()
if meta.Plugin == nil || *meta.Plugin.Type != "openapi/v0.0.40" {
t.Error("plugin.type not set correctly")
}
if meta.Slurm == nil || meta.Slurm.Version == nil {
t.Fatal("slurm.version is nil")
}
if *meta.Slurm.Version.Major != "24" || *meta.Slurm.Version.Minor != "5" || *meta.Slurm.Version.Micro != "0" {
t.Errorf("slurm.version = %v, want 24.5.0", meta.Slurm.Version)
}
}

View File

@@ -0,0 +1,405 @@
// Package testenv provides a complete test environment factory that wires up
// SQLite DB + MockSlurm + MockMinIO + all Stores/Services/Handlers + httptest Server.
package testenv
import (
"bytes"
"context"
"crypto/sha256"
"database/sql"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"time"
"gcy_hpc_server/internal/app"
"gcy_hpc_server/internal/config"
"gcy_hpc_server/internal/handler"
"gcy_hpc_server/internal/model"
"gcy_hpc_server/internal/server"
"gcy_hpc_server/internal/service"
"gcy_hpc_server/internal/slurm"
"gcy_hpc_server/internal/storage"
"gcy_hpc_server/internal/store"
"gcy_hpc_server/internal/testutil/mockminio"
"gcy_hpc_server/internal/testutil/mockslurm"
"go.uber.org/zap"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
// TestEnv holds a fully wired test environment with all dependencies.
type TestEnv struct {
DB *gorm.DB
MockSlurm *mockslurm.MockSlurm
MockMinIO *mockminio.InMemoryStorage
// Internal
mockSlurmSrv *httptest.Server
srv *httptest.Server
poller *app.TaskPoller
// Stores
appStore *store.ApplicationStore
taskStore *store.TaskStore
fileStore *store.FileStore
blobStore *store.BlobStore
uploadStore *store.UploadStore
folderStore *store.FolderStore
// Services
jobSvc *service.JobService
clusterSvc *service.ClusterService
folderSvc *service.FolderService
stagingSvc *service.FileStagingService
taskSvc *service.TaskService
appSvc *service.ApplicationService
uploadSvc *service.UploadService
fileSvc *service.FileService
jobH *handler.JobHandler
clusterH *handler.ClusterHandler
appH *handler.ApplicationHandler
uploadH *handler.UploadHandler
fileH *handler.FileHandler
folderH *handler.FolderHandler
taskH *handler.TaskHandler
workDir string
logger *zap.Logger
}
// Option configures a TestEnv during construction.
type Option func(*testEnvConfig)
type testEnvConfig struct {
workDir string
debugLog bool
}
// WithWorkDir overrides the default temporary work directory.
func WithWorkDir(path string) Option {
return func(c *testEnvConfig) { c.workDir = path }
}
// WithDebugLogging switches from zap.NewNop() to zap.NewExample().
func WithDebugLogging() Option {
return func(c *testEnvConfig) { c.debugLog = true }
}
// NewTestEnv creates a fully wired test environment.
// t.Cleanup is registered for all resources — callers do not need to clean up.
func NewTestEnv(t interface {
Fatalf(format string, args ...interface{})
}, opts ...Option) *TestEnv {
cfg := &testEnvConfig{}
for _, o := range opts {
o(cfg)
}
var logger *zap.Logger
if cfg.debugLog {
logger = zap.NewExample()
} else {
logger = zap.NewNop()
}
// 1. SQLite in-memory DB + AutoMigrate
dbName := fmt.Sprintf("file:testenv-%d?mode=memory&cache=shared", rand.Int63())
db, err := gorm.Open(sqlite.Open(dbName), &gorm.Config{})
if err != nil {
t.Fatalf("failed to open SQLite: %v", err)
}
sqlDB, err := db.DB()
if err != nil {
t.Fatalf("failed to get underlying sql.DB: %v", err)
}
sqlDB.SetMaxOpenConns(1)
if err := db.AutoMigrate(
&model.Application{},
&model.FileBlob{},
&model.File{},
&model.Folder{},
&model.UploadSession{},
&model.UploadChunk{},
&model.Task{},
); err != nil {
t.Fatalf("failed to auto-migrate: %v", err)
}
// 2. Temp work directory
workDir := cfg.workDir
if workDir == "" {
wd, err := os.MkdirTemp("", "testenv-workdir-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
workDir = wd
}
// 3. MockSlurm httptest server
mockSlurmSrv, mockSlurm := mockslurm.NewMockSlurmServer()
// 4. MockMinIO
mockMinIO := mockminio.NewInMemoryStorage()
// 5. All 6 Store instances
appStore := store.NewApplicationStore(db)
taskStore := store.NewTaskStore(db)
fileStore := store.NewFileStore(db)
blobStore := store.NewBlobStore(db)
uploadStore := store.NewUploadStore(db)
folderStore := store.NewFolderStore(db)
// 6. Slurm client
slurmClient, err := slurm.NewClientWithOpts(mockSlurmSrv.URL, slurm.WithHTTPClient(mockSlurmSrv.Client()))
if err != nil {
t.Fatalf("failed to create slurm client: %v", err)
}
// 7. MinioConfig
minioCfg := config.MinioConfig{
ChunkSize: 16 << 20,
MaxFileSize: 50 << 30,
MinChunkSize: 5 << 20,
SessionTTL: 48,
Bucket: "files",
}
// 8. All Service instances (dependency order)
jobSvc := service.NewJobService(slurmClient, logger)
clusterSvc := service.NewClusterService(slurmClient, logger)
folderSvc := service.NewFolderService(folderStore, fileStore, logger)
stagingSvc := service.NewFileStagingService(fileStore, blobStore, mockMinIO, minioCfg.Bucket, logger)
taskSvc := service.NewTaskService(taskStore, appStore, fileStore, blobStore, stagingSvc, jobSvc, workDir, logger)
appSvc := service.NewApplicationService(appStore, jobSvc, workDir, logger, taskSvc)
uploadSvc := service.NewUploadService(mockMinIO, blobStore, fileStore, uploadStore, minioCfg, db, logger)
fileSvc := service.NewFileService(mockMinIO, blobStore, fileStore, minioCfg.Bucket, db, logger)
// 9. All 7 Handler instances
jobH := handler.NewJobHandler(jobSvc, logger)
clusterH := handler.NewClusterHandler(clusterSvc, logger)
appH := handler.NewApplicationHandler(appSvc, logger)
uploadH := handler.NewUploadHandler(uploadSvc, logger)
fileH := handler.NewFileHandler(fileSvc, logger)
folderH := handler.NewFolderHandler(folderSvc, logger)
taskH := handler.NewTaskHandler(taskSvc, logger)
// 10. Router
router := server.NewRouter(jobH, clusterH, appH, uploadH, fileH, folderH, taskH, logger)
// 11. HTTP test server
srv := httptest.NewServer(router)
// 12. Start TaskProcessor
ctx := context.Background()
taskSvc.StartProcessor(ctx)
// 13. Start TaskPoller (100ms interval for tests)
poller := app.NewTaskPoller(taskSvc, 100*time.Millisecond, logger)
poller.Start(ctx)
env := &TestEnv{
DB: db,
MockSlurm: mockSlurm,
MockMinIO: mockMinIO,
mockSlurmSrv: mockSlurmSrv,
srv: srv,
poller: poller,
appStore: appStore,
taskStore: taskStore,
fileStore: fileStore,
blobStore: blobStore,
uploadStore: uploadStore,
folderStore: folderStore,
jobSvc: jobSvc,
clusterSvc: clusterSvc,
folderSvc: folderSvc,
stagingSvc: stagingSvc,
taskSvc: taskSvc,
appSvc: appSvc,
uploadSvc: uploadSvc,
fileSvc: fileSvc,
jobH: jobH,
clusterH: clusterH,
appH: appH,
uploadH: uploadH,
fileH: fileH,
folderH: folderH,
taskH: taskH,
workDir: workDir,
logger: logger,
}
// Cleanup registration (LIFO — last registered runs first).
// We use a *testing.T-compatible interface; callers using *testing.T
// will have t.Cleanup work correctly.
if ct, ok := t.(interface{ Cleanup(func()) }); ok {
ct.Cleanup(func() { os.RemoveAll(workDir) })
ct.Cleanup(srv.Close)
ct.Cleanup(poller.Stop)
ct.Cleanup(taskSvc.StopProcessor)
}
return env
}
// URL returns the base URL of the test HTTP server.
func (env *TestEnv) URL() string {
return env.srv.URL
}
// DoRequest sends an HTTP request to the test server.
// When body is non-nil, Content-Type: application/json is set.
func (env *TestEnv) DoRequest(method, path string, body io.Reader) *http.Response {
req, err := http.NewRequest(method, env.srv.URL+path, body)
if err != nil {
panic(fmt.Sprintf("failed to create request: %v", err))
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
panic(fmt.Sprintf("failed to send request: %v", err))
}
return resp
}
// DecodeResponse decodes an API response into its components.
// Returns success flag, raw data, and any error from decoding.
func (env *TestEnv) DecodeResponse(resp *http.Response) (bool, json.RawMessage, error) {
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, nil, fmt.Errorf("read body: %w", err)
}
resp.Body.Close()
var result struct {
Success bool `json:"success"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
if err := json.Unmarshal(body, &result); err != nil {
return false, nil, fmt.Errorf("unmarshal response: %w (body: %s)", err, string(body))
}
return result.Success, result.Data, nil
}
// CreateApp creates an application directly via the service and returns the app ID.
func (env *TestEnv) CreateApp(name, scriptTemplate string, params json.RawMessage) (int64, error) {
ctx := context.Background()
appID, err := env.appSvc.CreateApplication(ctx, &model.CreateApplicationRequest{
Name: name,
ScriptTemplate: scriptTemplate,
Parameters: params,
})
if err != nil {
return 0, err
}
return appID, nil
}
// UploadTestData uploads content to MockMinIO and creates FileBlob + File records.
// Returns (fileID, blobID).
func (env *TestEnv) UploadTestData(name string, content []byte) (int64, int64) {
ctx := context.Background()
h := sha256.Sum256(content)
sha256Key := hex.EncodeToString(h[:])
_, err := env.MockMinIO.PutObject(ctx, "files", sha256Key, bytes.NewReader(content), int64(len(content)), storage.PutObjectOptions{})
if err != nil {
panic(fmt.Sprintf("PutObject failed: %v", err))
}
blob, err := env.blobStore.GetBySHA256(ctx, sha256Key)
if err != nil {
panic(fmt.Sprintf("get blob by sha256: %v", err))
}
if blob == nil {
blob = &model.FileBlob{
SHA256: sha256Key,
MinioKey: sha256Key,
FileSize: int64(len(content)),
RefCount: 1,
}
if err := env.blobStore.Create(ctx, blob); err != nil {
panic(fmt.Sprintf("create blob: %v", err))
}
}
file := &model.File{
Name: name,
BlobSHA256: sha256Key,
}
if err := env.fileStore.Create(ctx, file); err != nil {
panic(fmt.Sprintf("create file failed: %v", err))
}
return file.ID, blob.ID
}
// CreateFile uploads test data and returns only the file ID.
func (env *TestEnv) CreateFile(name string, content []byte) (int64, error) {
fileID, _ := env.UploadTestData(name, content)
return fileID, nil
}
// WaitForTaskStatus polls the DB until the task reaches the target status or timeout.
func (env *TestEnv) WaitForTaskStatus(taskID int64, status string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
ctx := context.Background()
for time.Now().Before(deadline) {
task, err := env.taskStore.GetByID(ctx, taskID)
if err != nil {
return fmt.Errorf("get task: %w", err)
}
if task == nil {
return fmt.Errorf("task %d not found", taskID)
}
if task.Status == status {
return nil
}
time.Sleep(100 * time.Millisecond)
}
task, _ := env.taskStore.GetByID(ctx, taskID)
if task != nil {
return fmt.Errorf("timeout waiting for task %d to reach status %q, current: %q", taskID, status, task.Status)
}
return fmt.Errorf("timeout waiting for task %d to reach status %q", taskID, status)
}
// MakeTaskStale sets the task's updated_at to 31 seconds ago via raw SQL,
// bypassing GORM's AutoUpdateTime.
func (env *TestEnv) MakeTaskStale(taskID int64) error {
sqlDB, err := env.DB.DB()
if err != nil {
return fmt.Errorf("get sql.DB: %w", err)
}
_, err = sqlDB.Exec("UPDATE hpc_tasks SET updated_at = ? WHERE id = ?", time.Now().Add(-31*time.Second), taskID)
return err
}
// GetTaskSlurmJobID returns the slurm_job_id for the given task via raw SQL.
func (env *TestEnv) GetTaskSlurmJobID(taskID int64) (int32, error) {
sqlDB, err := env.DB.DB()
if err != nil {
return 0, fmt.Errorf("get sql.DB: %w", err)
}
var jobID sql.NullInt32
err = sqlDB.QueryRow("SELECT slurm_job_id FROM hpc_tasks WHERE id = ?", taskID).Scan(&jobID)
if err != nil {
return 0, err
}
if !jobID.Valid {
return 0, fmt.Errorf("task %d has no slurm_job_id", taskID)
}
return jobID.Int32, nil
}

View File

@@ -0,0 +1,245 @@
package testenv
import (
"encoding/json"
"io"
"net/http"
"testing"
"time"
"gcy_hpc_server/internal/model"
"gcy_hpc_server/internal/storage"
)
func TestNewTestEnv(t *testing.T) {
env := NewTestEnv(t)
defer func() {
env.taskSvc.StopProcessor()
env.poller.Stop()
env.srv.Close()
}()
if env.DB == nil {
t.Fatal("DB is nil")
}
if env.MockSlurm == nil {
t.Fatal("MockSlurm is nil")
}
if env.MockMinIO == nil {
t.Fatal("MockMinIO is nil")
}
if env.srv == nil {
t.Fatal("srv is nil")
}
resp, err := http.Get(env.srv.URL + "/api/v1/applications")
if err != nil {
t.Fatalf("GET /api/v1/applications: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200, got %d", resp.StatusCode)
}
}
func TestAllRoutesRegistered(t *testing.T) {
env := NewTestEnv(t)
routes := []struct {
method string
path string
}{
{"POST", "/api/v1/jobs/submit"},
{"GET", "/api/v1/jobs"},
{"GET", "/api/v1/jobs/history"},
{"GET", "/api/v1/jobs/1"},
{"DELETE", "/api/v1/jobs/1"},
{"GET", "/api/v1/nodes"},
{"GET", "/api/v1/nodes/node01"},
{"GET", "/api/v1/partitions"},
{"GET", "/api/v1/partitions/normal"},
{"GET", "/api/v1/diag"},
{"GET", "/api/v1/applications"},
{"POST", "/api/v1/applications"},
{"GET", "/api/v1/applications/1"},
{"PUT", "/api/v1/applications/1"},
{"DELETE", "/api/v1/applications/1"},
{"POST", "/api/v1/applications/1/submit"},
{"POST", "/api/v1/files/uploads"},
{"GET", "/api/v1/files/uploads/1"},
{"PUT", "/api/v1/files/uploads/1/chunks/0"},
{"POST", "/api/v1/files/uploads/1/complete"},
{"DELETE", "/api/v1/files/uploads/1"},
{"GET", "/api/v1/files"},
{"GET", "/api/v1/files/1"},
{"GET", "/api/v1/files/1/download"},
{"DELETE", "/api/v1/files/1"},
{"POST", "/api/v1/files/folders"},
{"GET", "/api/v1/files/folders"},
{"GET", "/api/v1/files/folders/1"},
{"DELETE", "/api/v1/files/folders/1"},
{"POST", "/api/v1/tasks"},
{"GET", "/api/v1/tasks"},
}
if len(routes) != 31 {
t.Fatalf("expected 31 routes, got %d", len(routes))
}
for _, r := range routes {
req, err := http.NewRequest(r.method, env.srv.URL+r.path, nil)
if err != nil {
t.Fatalf("%s %s: create request: %v", r.method, r.path, err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("%s %s: %v", r.method, r.path, err)
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
// Gin returns 404 for unregistered routes WITHOUT a JSON body.
// Handler-level 404s return JSON with {"success":false,...}.
// So a 404 that decodes as valid JSON means the route IS registered.
if resp.StatusCode == http.StatusNotFound {
var apiResp struct {
Success bool `json:"success"`
}
if json.Unmarshal(body, &apiResp) != nil {
t.Errorf("%s %s: got router-level 404 (route not registered)", r.method, r.path)
}
}
}
}
func TestMakeTaskStale(t *testing.T) {
env := NewTestEnv(t)
ctx := t.Context()
task := &model.Task{
TaskName: "stale-test",
AppID: 1,
AppName: "test-app",
Status: model.TaskStatusSubmitted,
}
taskID, err := env.taskStore.Create(ctx, task)
if err != nil {
t.Fatalf("create task: %v", err)
}
before := time.Now()
if err := env.MakeTaskStale(taskID); err != nil {
t.Fatalf("MakeTaskStale: %v", err)
}
updated, err := env.taskStore.GetByID(ctx, taskID)
if err != nil {
t.Fatalf("get task: %v", err)
}
if !updated.UpdatedAt.Before(before.Add(-25 * time.Second)) {
t.Errorf("expected updated_at to be >25s in the past, got %v (before=%v)", updated.UpdatedAt, before)
}
}
func TestUploadTestData(t *testing.T) {
env := NewTestEnv(t)
content := []byte("hello testenv")
fileID, blobID := env.UploadTestData("test.txt", content)
if fileID == 0 {
t.Fatal("fileID is 0")
}
if blobID == 0 {
t.Fatal("blobID is 0")
}
ctx := t.Context()
file, err := env.fileStore.GetByID(ctx, fileID)
if err != nil {
t.Fatalf("get file: %v", err)
}
if file == nil {
t.Fatal("file not found")
}
if file.Name != "test.txt" {
t.Errorf("expected name 'test.txt', got %q", file.Name)
}
blob, err := env.blobStore.GetBySHA256(ctx, file.BlobSHA256)
if err != nil {
t.Fatalf("get blob: %v", err)
}
if blob == nil {
t.Fatal("blob not found")
}
if blob.FileSize != int64(len(content)) {
t.Errorf("expected size %d, got %d", len(content), blob.FileSize)
}
obj, info, err := env.MockMinIO.GetObject(ctx, "files", blob.MinioKey, storage.GetOptions{})
if err != nil {
t.Fatalf("get object: %v", err)
}
defer obj.Close()
got, _ := io.ReadAll(obj)
if string(got) != string(content) {
t.Errorf("expected content %q, got %q", content, got)
}
if info.Size != int64(len(content)) {
t.Errorf("expected object size %d, got %d", len(content), info.Size)
}
}
func TestCreateApp(t *testing.T) {
env := NewTestEnv(t)
params := json.RawMessage(`[{"name":"cores","type":"integer","required":true}]`)
appID, err := env.CreateApp("test-app", "#!/bin/bash\necho hello", params)
if err != nil {
t.Fatalf("CreateApp: %v", err)
}
if appID == 0 {
t.Fatal("appID is 0")
}
ctx := t.Context()
app, err := env.appStore.GetByID(ctx, appID)
if err != nil {
t.Fatalf("get app: %v", err)
}
if app == nil {
t.Fatal("app not found")
}
if app.Name != "test-app" {
t.Errorf("expected name 'test-app', got %q", app.Name)
}
}
func TestDoRequest_SetsJSONContentType(t *testing.T) {
env := NewTestEnv(t)
resp := env.DoRequest("POST", "/api/v1/applications", nil)
defer resp.Body.Close()
if resp.StatusCode == 0 {
t.Fatal("no status code")
}
}
func TestDecodeResponse(t *testing.T) {
env := NewTestEnv(t)
resp := env.DoRequest("GET", "/api/v1/applications", nil)
defer resp.Body.Close()
success, data, err := env.DecodeResponse(resp)
if err != nil {
t.Fatalf("DecodeResponse: %v", err)
}
if !success {
t.Error("expected success=true for list applications")
}
if data == nil {
t.Error("expected non-nil data")
}
}