feat(testutil): add MockSlurm, MockMinIO, TestEnv and 37 integration tests
- mockminio: in-memory ObjectStorage with all 11 methods, thread-safe, SHA256 ETag, Range support - mockslurm: httptest server with 11 Slurm REST API endpoints, job eviction from active to history queue - testenv: one-line test environment factory (SQLite + MockSlurm + MockMinIO + all stores/services/handlers + httptest server) - integration tests: 37 tests covering Jobs(5), Cluster(5), App(6), Upload(5), File(4), Folder(4), Task(4), E2E(1) - no external dependencies, no existing files modified
This commit is contained in:
544
internal/testutil/mockslurm/server.go
Normal file
544
internal/testutil/mockslurm/server.go
Normal file
@@ -0,0 +1,544 @@
|
||||
// Package mockslurm provides a complete HTTP mock server for the Slurm REST API.
|
||||
// It supports all 11 endpoints (P0: 4 job + P1: 7 cluster/history) and includes
|
||||
// job eviction from active to history queue on terminal states.
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
// MockJob represents a job tracked by the mock server.
|
||||
type MockJob struct {
|
||||
JobID int32
|
||||
Name string
|
||||
State string // single state string for internal tracking
|
||||
Script string
|
||||
Partition string
|
||||
WorkDir string
|
||||
SubmitTime time.Time
|
||||
StartTime *time.Time
|
||||
EndTime *time.Time
|
||||
ExitCode *int32
|
||||
}
|
||||
|
||||
// MockNode represents a node tracked by the mock server.
|
||||
type MockNode struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// MockPartition represents a partition tracked by the mock server.
|
||||
type MockPartition struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// MockSlurm is the mock Slurm API server controller.
|
||||
type MockSlurm struct {
|
||||
mu sync.RWMutex
|
||||
activeJobs map[int32]*MockJob
|
||||
historyJobs map[int32]*MockJob
|
||||
nodes []MockNode
|
||||
partitions []MockPartition
|
||||
nextID int32
|
||||
server *httptest.Server
|
||||
}
|
||||
|
||||
// NewMockSlurmServer creates and starts a mock Slurm REST API server.
|
||||
// Returns the httptest.Server and the MockSlurm controller.
|
||||
func NewMockSlurmServer() (*httptest.Server, *MockSlurm) {
|
||||
m := &MockSlurm{
|
||||
activeJobs: make(map[int32]*MockJob),
|
||||
historyJobs: make(map[int32]*MockJob),
|
||||
nodes: []MockNode{
|
||||
{Name: "node01"},
|
||||
{Name: "node02"},
|
||||
{Name: "node03"},
|
||||
},
|
||||
partitions: []MockPartition{
|
||||
{Name: "normal"},
|
||||
{Name: "gpu"},
|
||||
},
|
||||
nextID: 1,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// P0: Exact paths FIRST (before prefix paths)
|
||||
mux.HandleFunc("/slurm/v0.0.40/job/submit", m.handleJobSubmit)
|
||||
mux.HandleFunc("/slurm/v0.0.40/jobs", m.handleGetJobs)
|
||||
|
||||
// P0: Prefix path for /job/{id} — GET and DELETE
|
||||
mux.HandleFunc("/slurm/v0.0.40/job/", m.handleJobByID)
|
||||
|
||||
// P1: Cluster endpoints
|
||||
mux.HandleFunc("/slurm/v0.0.40/nodes", m.handleGetNodes)
|
||||
mux.HandleFunc("/slurm/v0.0.40/node/", m.handleGetNode)
|
||||
mux.HandleFunc("/slurm/v0.0.40/partitions", m.handleGetPartitions)
|
||||
mux.HandleFunc("/slurm/v0.0.40/partition/", m.handleGetPartition)
|
||||
mux.HandleFunc("/slurm/v0.0.40/diag", m.handleDiag)
|
||||
|
||||
// P1: SlurmDB endpoints
|
||||
mux.HandleFunc("/slurmdb/v0.0.40/jobs", m.handleSlurmdbJobs)
|
||||
mux.HandleFunc("/slurmdb/v0.0.40/job/", m.handleSlurmdbJob)
|
||||
|
||||
srv := httptest.NewServer(mux)
|
||||
m.server = srv
|
||||
|
||||
return srv, m
|
||||
}
|
||||
|
||||
// Server returns the underlying httptest.Server.
|
||||
func (m *MockSlurm) Server() *httptest.Server {
|
||||
return m.server
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Controller methods
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// SetJobState transitions a job to the given state.
|
||||
// Terminal states (COMPLETED/FAILED/CANCELLED/TIMEOUT) evict the job from
|
||||
// activeJobs into historyJobs. RUNNING sets StartTime and stays active.
|
||||
// PENDING stays in activeJobs.
|
||||
func (m *MockSlurm) SetJobState(id int32, state string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
mj, ok := m.activeJobs[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
switch state {
|
||||
case "RUNNING":
|
||||
mj.State = state
|
||||
mj.StartTime = &now
|
||||
case "COMPLETED", "FAILED", "CANCELLED", "TIMEOUT":
|
||||
mj.State = state
|
||||
mj.EndTime = &now
|
||||
exitCode := int32(0)
|
||||
if state != "COMPLETED" {
|
||||
exitCode = 1
|
||||
}
|
||||
mj.ExitCode = &exitCode
|
||||
delete(m.activeJobs, id)
|
||||
m.historyJobs[id] = mj
|
||||
case "PENDING":
|
||||
mj.State = state
|
||||
}
|
||||
}
|
||||
|
||||
// GetJobState returns the current state of the job with the given ID.
|
||||
// Returns empty string if the job is not found.
|
||||
func (m *MockSlurm) GetJobState(id int32) string {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
if mj, ok := m.activeJobs[id]; ok {
|
||||
return mj.State
|
||||
}
|
||||
if mj, ok := m.historyJobs[id]; ok {
|
||||
return mj.State
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetAllActiveJobs returns all jobs currently in the active queue.
|
||||
func (m *MockSlurm) GetAllActiveJobs() []*MockJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]*MockJob, 0, len(m.activeJobs))
|
||||
for _, mj := range m.activeJobs {
|
||||
jobs = append(jobs, mj)
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
// GetAllHistoryJobs returns all jobs in the history queue.
|
||||
func (m *MockSlurm) GetAllHistoryJobs() []*MockJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]*MockJob, 0, len(m.historyJobs))
|
||||
for _, mj := range m.historyJobs {
|
||||
jobs = append(jobs, mj)
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Job Core Endpoints
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// POST /slurm/v0.0.40/job/submit
|
||||
func (m *MockSlurm) handleJobSubmit(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
var req slurm.JobSubmitReq
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
jobID := m.nextID
|
||||
m.nextID++
|
||||
|
||||
job := &MockJob{
|
||||
JobID: jobID,
|
||||
State: "PENDING", // MUST be non-empty for mapSlurmStateToTaskStatus
|
||||
SubmitTime: time.Now(),
|
||||
}
|
||||
|
||||
if req.Script != nil {
|
||||
job.Script = *req.Script
|
||||
}
|
||||
if req.Job != nil {
|
||||
if req.Job.Name != nil {
|
||||
job.Name = *req.Job.Name
|
||||
}
|
||||
if req.Job.Partition != nil {
|
||||
job.Partition = *req.Job.Partition
|
||||
}
|
||||
if req.Job.CurrentWorkingDirectory != nil {
|
||||
job.WorkDir = *req.Job.CurrentWorkingDirectory
|
||||
}
|
||||
if req.Job.Script != nil {
|
||||
job.Script = *req.Job.Script
|
||||
}
|
||||
}
|
||||
|
||||
m.activeJobs[jobID] = job
|
||||
m.mu.Unlock()
|
||||
|
||||
resp := NewSubmitResponse(jobID)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/jobs
|
||||
func (m *MockSlurm) handleGetJobs(w http.ResponseWriter, r *http.Request) {
|
||||
m.mu.RLock()
|
||||
jobs := make([]slurm.JobInfo, 0, len(m.activeJobs))
|
||||
for _, mj := range m.activeJobs {
|
||||
jobs = append(jobs, m.mockJobToJobInfo(mj))
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
resp := NewJobInfoResponse(jobs)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET/DELETE /slurm/v0.0.40/job/{id}
|
||||
func (m *MockSlurm) handleJobByID(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
// /slurm/v0.0.40/job/{id} → segments[0]="", [1]="slurm", [2]="v0.0.40", [3]="job", [4]=id
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing job id")
|
||||
return
|
||||
}
|
||||
|
||||
last := segments[4]
|
||||
|
||||
// Safety net: if "submit" leaks through prefix match, forward to submit handler
|
||||
if last == "submit" {
|
||||
m.handleJobSubmit(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(last, 10, 32)
|
||||
if err != nil {
|
||||
m.writeError(w, http.StatusBadRequest, "invalid job id")
|
||||
return
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
m.handleGetJobByID(w, int32(id))
|
||||
case http.MethodDelete:
|
||||
m.handleDeleteJobByID(w, int32(id))
|
||||
default:
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockSlurm) handleGetJobByID(w http.ResponseWriter, jobID int32) {
|
||||
m.mu.RLock()
|
||||
mj, ok := m.activeJobs[jobID]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
ji := m.mockJobToJobInfo(mj)
|
||||
resp := NewJobInfoResponse([]slurm.JobInfo{ji})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (m *MockSlurm) handleDeleteJobByID(w http.ResponseWriter, jobID int32) {
|
||||
m.SetJobState(jobID, "CANCELLED")
|
||||
resp := NewDeleteResponse()
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Cluster/History Endpoints
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// GET /slurm/v0.0.40/nodes
|
||||
func (m *MockSlurm) handleGetNodes(w http.ResponseWriter, r *http.Request) {
|
||||
nodes := make([]slurm.Node, len(m.nodes))
|
||||
for i, n := range m.nodes {
|
||||
nodes[i] = slurm.Node{Name: slurm.Ptr(n.Name)}
|
||||
}
|
||||
resp := NewNodeResponse(nodes)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/node/{name}
|
||||
func (m *MockSlurm) handleGetNode(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing node name")
|
||||
return
|
||||
}
|
||||
nodeName := segments[4]
|
||||
|
||||
var found *slurm.Node
|
||||
for _, n := range m.nodes {
|
||||
if n.Name == nodeName {
|
||||
found = &slurm.Node{Name: slurm.Ptr(n.Name)}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
m.writeError(w, http.StatusNotFound, "node not found")
|
||||
return
|
||||
}
|
||||
|
||||
resp := NewNodeResponse([]slurm.Node{*found})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/partitions
|
||||
func (m *MockSlurm) handleGetPartitions(w http.ResponseWriter, r *http.Request) {
|
||||
parts := make([]slurm.PartitionInfo, len(m.partitions))
|
||||
for i, p := range m.partitions {
|
||||
parts[i] = slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
|
||||
}
|
||||
resp := NewPartitionResponse(parts)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/partition/{name}
|
||||
func (m *MockSlurm) handleGetPartition(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing partition name")
|
||||
return
|
||||
}
|
||||
partName := segments[4]
|
||||
|
||||
var found *slurm.PartitionInfo
|
||||
for _, p := range m.partitions {
|
||||
if p.Name == partName {
|
||||
found = &slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
m.writeError(w, http.StatusNotFound, "partition not found")
|
||||
return
|
||||
}
|
||||
|
||||
resp := NewPartitionResponse([]slurm.PartitionInfo{*found})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/diag
|
||||
func (m *MockSlurm) handleDiag(w http.ResponseWriter, r *http.Request) {
|
||||
resp := NewDiagResponse()
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurmdb/v0.0.40/jobs — supports filter params: job_name, start_time, end_time
|
||||
func (m *MockSlurm) handleSlurmdbJobs(w http.ResponseWriter, r *http.Request) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]slurm.Job, 0)
|
||||
for _, mj := range m.historyJobs {
|
||||
// Filter by job_name
|
||||
if name := r.URL.Query().Get("job_name"); name != "" && mj.Name != name {
|
||||
continue
|
||||
}
|
||||
// Filter by start_time (job start must be >= filter start)
|
||||
if startStr := r.URL.Query().Get("start_time"); startStr != "" {
|
||||
if st, err := strconv.ParseInt(startStr, 10, 64); err == nil {
|
||||
if mj.StartTime == nil || mj.StartTime.Unix() < st {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter by end_time (job end must be <= filter end)
|
||||
if endStr := r.URL.Query().Get("end_time"); endStr != "" {
|
||||
if et, err := strconv.ParseInt(endStr, 10, 64); err == nil {
|
||||
if mj.EndTime == nil || mj.EndTime.Unix() > et {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
jobs = append(jobs, m.mockJobToSlurmDBJob(mj))
|
||||
}
|
||||
|
||||
resp := NewJobHistoryResponse(jobs)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurmdb/v0.0.40/job/{id} — returns OpenapiSlurmdbdJobsResp (with jobs array wrapper)
|
||||
func (m *MockSlurm) handleSlurmdbJob(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(segments[4], 10, 32)
|
||||
if err != nil {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
mj, ok := m.historyJobs[int32(id)]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
dbJob := m.mockJobToSlurmDBJob(mj)
|
||||
resp := NewJobHistoryResponse([]slurm.Job{dbJob})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Conversion helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// mockJobToJobInfo converts a MockJob to an active-endpoint JobInfo.
|
||||
// Uses buildActiveJobState for flat []string state format: ["RUNNING"].
|
||||
func (m *MockSlurm) mockJobToJobInfo(mj *MockJob) slurm.JobInfo {
|
||||
ji := slurm.JobInfo{
|
||||
JobID: slurm.Ptr(mj.JobID),
|
||||
JobState: buildActiveJobState(mj.State), // MUST be non-empty []string
|
||||
Name: slurm.Ptr(mj.Name),
|
||||
Partition: slurm.Ptr(mj.Partition),
|
||||
CurrentWorkingDirectory: slurm.Ptr(mj.WorkDir),
|
||||
SubmitTime: &slurm.Uint64NoVal{Number: slurm.Ptr(mj.SubmitTime.Unix())},
|
||||
}
|
||||
|
||||
if mj.StartTime != nil {
|
||||
ji.StartTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.StartTime.Unix())}
|
||||
}
|
||||
if mj.EndTime != nil {
|
||||
ji.EndTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.EndTime.Unix())}
|
||||
}
|
||||
if mj.ExitCode != nil {
|
||||
ji.ExitCode = &slurm.ProcessExitCodeVerbose{
|
||||
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
|
||||
}
|
||||
}
|
||||
|
||||
return ji
|
||||
}
|
||||
|
||||
// mockJobToSlurmDBJob converts a MockJob to a SlurmDB history Job.
|
||||
// Uses buildHistoryJobState for nested state format: {current: ["COMPLETED"], reason: ""}.
|
||||
func (m *MockSlurm) mockJobToSlurmDBJob(mj *MockJob) slurm.Job {
|
||||
dbJob := slurm.Job{
|
||||
JobID: slurm.Ptr(mj.JobID),
|
||||
Name: slurm.Ptr(mj.Name),
|
||||
Partition: slurm.Ptr(mj.Partition),
|
||||
WorkingDirectory: slurm.Ptr(mj.WorkDir),
|
||||
Script: slurm.Ptr(mj.Script),
|
||||
State: buildHistoryJobState(mj.State),
|
||||
Time: &slurm.JobTime{
|
||||
Submission: slurm.Ptr(mj.SubmitTime.Unix()),
|
||||
},
|
||||
}
|
||||
|
||||
if mj.StartTime != nil {
|
||||
dbJob.Time.Start = slurm.Ptr(mj.StartTime.Unix())
|
||||
}
|
||||
if mj.EndTime != nil {
|
||||
dbJob.Time.End = slurm.Ptr(mj.EndTime.Unix())
|
||||
}
|
||||
if mj.ExitCode != nil {
|
||||
dbJob.ExitCode = &slurm.ProcessExitCodeVerbose{
|
||||
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
|
||||
}
|
||||
}
|
||||
|
||||
return dbJob
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// writeJSON writes a JSON response with the given status code.
|
||||
func writeJSON(w http.ResponseWriter, code int, v interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
// writeError writes an HTTP error with an OpenapiResp body containing
|
||||
// meta and errors fields. This is critical for CheckResponse/IsNotFound
|
||||
// to work correctly — the response body must be parseable as OpenapiResp.
|
||||
func (m *MockSlurm) writeError(w http.ResponseWriter, statusCode int, message string) {
|
||||
meta := slurm.OpenapiMeta{
|
||||
Plugin: &slurm.MetaPlugin{
|
||||
Type: slurm.Ptr("openapi/v0.0.40"),
|
||||
Name: slurm.Ptr(""),
|
||||
},
|
||||
Slurm: &slurm.MetaSlurm{
|
||||
Version: &slurm.MetaSlurmVersion{
|
||||
Major: slurm.Ptr("24"),
|
||||
Micro: slurm.Ptr("0"),
|
||||
Minor: slurm.Ptr("5"),
|
||||
},
|
||||
Release: slurm.Ptr("24.05.0"),
|
||||
},
|
||||
}
|
||||
|
||||
resp := slurm.OpenapiResp{
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{
|
||||
{
|
||||
ErrorNumber: slurm.Ptr(int32(0)),
|
||||
Error: slurm.Ptr(message),
|
||||
},
|
||||
},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
|
||||
writeJSON(w, statusCode, resp)
|
||||
}
|
||||
679
internal/testutil/mockslurm/server_test.go
Normal file
679
internal/testutil/mockslurm/server_test.go
Normal file
@@ -0,0 +1,679 @@
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
func setupTestClient(t *testing.T) (*slurm.Client, *MockSlurm) {
|
||||
t.Helper()
|
||||
srv, mock := NewMockSlurmServer()
|
||||
t.Cleanup(srv.Close)
|
||||
client, err := slurm.NewClientWithOpts(srv.URL, slurm.WithHTTPClient(srv.Client()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
return client, mock
|
||||
}
|
||||
|
||||
func submitTestJob(t *testing.T, client *slurm.Client, name, partition, workDir, script string) int32 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr(script),
|
||||
Job: &slurm.JobDescMsg{
|
||||
Name: slurm.Ptr(name),
|
||||
Partition: slurm.Ptr(partition),
|
||||
CurrentWorkingDirectory: slurm.Ptr(workDir),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob failed: %v", err)
|
||||
}
|
||||
if resp.JobID == nil {
|
||||
t.Fatal("SubmitJob returned nil JobID")
|
||||
}
|
||||
return *resp.JobID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Submit Job
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSubmitJob(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr("#!/bin/bash\necho hello"),
|
||||
Job: &slurm.JobDescMsg{
|
||||
Name: slurm.Ptr("test-job"),
|
||||
Partition: slurm.Ptr("normal"),
|
||||
CurrentWorkingDirectory: slurm.Ptr("/tmp/work"),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob failed: %v", err)
|
||||
}
|
||||
if resp.JobID == nil || *resp.JobID != 1 {
|
||||
t.Errorf("JobID = %v, want 1", resp.JobID)
|
||||
}
|
||||
if resp.StepID == nil || *resp.StepID != "Scalar" {
|
||||
t.Errorf("StepID = %v, want Scalar", resp.StepID)
|
||||
}
|
||||
if resp.Result == nil || resp.Result.JobID == nil || *resp.Result.JobID != 1 {
|
||||
t.Errorf("Result.JobID = %v, want 1", resp.Result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitJobAutoIncrement(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr("#!/bin/bash\necho " + strconv.Itoa(i)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob %d failed: %v", i, err)
|
||||
}
|
||||
if resp.JobID == nil || *resp.JobID != int32(i) {
|
||||
t.Errorf("job %d: JobID = %v, want %d", i, resp.JobID, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Get All Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetJobsEmpty(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 0 {
|
||||
t.Errorf("len(Jobs) = %d, want 0", len(resp.Jobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJobsWithSubmitted(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
submitTestJob(t, client, "job-a", "normal", "/tmp/a", "#!/bin/bash\ntrue")
|
||||
submitTestJob(t, client, "job-b", "gpu", "/tmp/b", "#!/bin/bash\nfalse")
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 2 {
|
||||
t.Fatalf("len(Jobs) = %d, want 2", len(resp.Jobs))
|
||||
}
|
||||
|
||||
names := map[string]bool{}
|
||||
for _, j := range resp.Jobs {
|
||||
if j.Name != nil {
|
||||
names[*j.Name] = true
|
||||
}
|
||||
if len(j.JobState) == 0 || j.JobState[0] != "PENDING" {
|
||||
t.Errorf("JobState = %v, want [PENDING]", j.JobState)
|
||||
}
|
||||
}
|
||||
if !names["job-a"] || !names["job-b"] {
|
||||
t.Errorf("expected job-a and job-b, got %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Get Job By ID
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetJobByID(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "single-job", "normal", "/tmp/work", "#!/bin/bash\necho hi")
|
||||
|
||||
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.JobID == nil || *job.JobID != jobID {
|
||||
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
|
||||
}
|
||||
if job.Name == nil || *job.Name != "single-job" {
|
||||
t.Errorf("Name = %v, want single-job", job.Name)
|
||||
}
|
||||
if job.Partition == nil || *job.Partition != "normal" {
|
||||
t.Errorf("Partition = %v, want normal", job.Partition)
|
||||
}
|
||||
if job.CurrentWorkingDirectory == nil || *job.CurrentWorkingDirectory != "/tmp/work" {
|
||||
t.Errorf("CurrentWorkingDirectory = %v, want /tmp/work", job.CurrentWorkingDirectory)
|
||||
}
|
||||
if job.SubmitTime == nil || job.SubmitTime.Number == nil {
|
||||
t.Error("SubmitTime should be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJobByIDNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Jobs.GetJob(ctx, "999", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown job ID, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error type = %T, want SlurmAPIError with 404", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Delete Job (triggers eviction)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestDeleteJob(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "cancel-me", "normal", "/tmp", "#!/bin/bash\nsleep 99")
|
||||
|
||||
resp, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteJob failed: %v", err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("DeleteJob returned nil response")
|
||||
}
|
||||
|
||||
if len(mock.GetAllActiveJobs()) != 0 {
|
||||
t.Error("active jobs should be empty after delete")
|
||||
}
|
||||
if len(mock.GetAllHistoryJobs()) != 1 {
|
||||
t.Error("history should contain 1 job after delete")
|
||||
}
|
||||
if mock.GetJobState(jobID) != "CANCELLED" {
|
||||
t.Errorf("job state = %q, want CANCELLED", mock.GetJobState(jobID))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteJobEvictsFromActive(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "to-delete", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
|
||||
_, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteJob failed: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected 404 after delete, got nil error")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Job State format ([]string, not bare string)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestJobStateIsStringArray(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
submitTestJob(t, client, "state-test", "normal", "/tmp", "#!/bin/bash\necho")
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) == 0 {
|
||||
t.Fatal("expected at least one job")
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if len(job.JobState) == 0 {
|
||||
t.Fatal("JobState is empty — must be non-empty []string to avoid mapSlurmStateToTaskStatus silent failure")
|
||||
}
|
||||
if job.JobState[0] != "PENDING" {
|
||||
t.Errorf("JobState[0] = %q, want %q", job.JobState[0], "PENDING")
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(job)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal job: %v", err)
|
||||
}
|
||||
if !strings.Contains(string(raw), `"job_state":["PENDING"]`) {
|
||||
t.Errorf("JobState JSON = %s, want array format [\"PENDING\"]", string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Full Job Lifecycle
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestJobLifecycle(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "lifecycle", "normal", "/tmp/lc", "#!/bin/bash\necho lifecycle")
|
||||
|
||||
// Verify PENDING in active
|
||||
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob PENDING: %v", err)
|
||||
}
|
||||
if resp.Jobs[0].JobState[0] != "PENDING" {
|
||||
t.Errorf("initial state = %v, want PENDING", resp.Jobs[0].JobState)
|
||||
}
|
||||
if len(mock.GetAllActiveJobs()) != 1 {
|
||||
t.Error("should have 1 active job")
|
||||
}
|
||||
|
||||
// Transition to RUNNING
|
||||
mock.SetJobState(jobID, "RUNNING")
|
||||
resp, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob RUNNING: %v", err)
|
||||
}
|
||||
if resp.Jobs[0].JobState[0] != "RUNNING" {
|
||||
t.Errorf("running state = %v, want RUNNING", resp.Jobs[0].JobState)
|
||||
}
|
||||
if resp.Jobs[0].StartTime == nil || resp.Jobs[0].StartTime.Number == nil {
|
||||
t.Error("StartTime should be set for RUNNING job")
|
||||
}
|
||||
if len(mock.GetAllActiveJobs()) != 1 {
|
||||
t.Error("should still have 1 active job after RUNNING")
|
||||
}
|
||||
|
||||
// Transition to COMPLETED — triggers eviction
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected 404 after COMPLETED (evicted from active)")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
|
||||
if len(mock.GetAllActiveJobs()) != 0 {
|
||||
t.Error("active jobs should be empty after COMPLETED")
|
||||
}
|
||||
if len(mock.GetAllHistoryJobs()) != 1 {
|
||||
t.Error("history should contain 1 job after COMPLETED")
|
||||
}
|
||||
if mock.GetJobState(jobID) != "COMPLETED" {
|
||||
t.Errorf("state = %q, want COMPLETED", mock.GetJobState(jobID))
|
||||
}
|
||||
|
||||
// Verify history endpoint returns the job
|
||||
histResp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("SlurmdbJobs.GetJob: %v", err)
|
||||
}
|
||||
if len(histResp.Jobs) != 1 {
|
||||
t.Fatalf("history jobs = %d, want 1", len(histResp.Jobs))
|
||||
}
|
||||
histJob := histResp.Jobs[0]
|
||||
if histJob.State == nil || len(histJob.State.Current) == 0 || histJob.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("history state = %v, want current=[COMPLETED]", histJob.State)
|
||||
}
|
||||
if histJob.ExitCode == nil || histJob.ExitCode.ReturnCode == nil || histJob.ExitCode.ReturnCode.Number == nil {
|
||||
t.Error("history ExitCode should be set")
|
||||
} else if *histJob.ExitCode.ReturnCode.Number != 0 {
|
||||
t.Errorf("exit code = %d, want 0 for COMPLETED", *histJob.ExitCode.ReturnCode.Number)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Nodes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetNodes(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
_ = mock
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Nodes.GetNodes(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNodes failed: %v", err)
|
||||
}
|
||||
if resp.Nodes == nil {
|
||||
t.Fatal("Nodes is nil")
|
||||
}
|
||||
if len(*resp.Nodes) != 3 {
|
||||
t.Errorf("len(Nodes) = %d, want 3", len(*resp.Nodes))
|
||||
}
|
||||
names := make([]string, len(*resp.Nodes))
|
||||
for i, n := range *resp.Nodes {
|
||||
if n.Name == nil {
|
||||
t.Errorf("node %d: Name is nil", i)
|
||||
} else {
|
||||
names[i] = *n.Name
|
||||
}
|
||||
}
|
||||
for _, expected := range []string{"node01", "node02", "node03"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == expected {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("missing node %q in %v", expected, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNode(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Nodes.GetNode(ctx, "node02", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNode failed: %v", err)
|
||||
}
|
||||
if resp.Nodes == nil || len(*resp.Nodes) != 1 {
|
||||
t.Fatalf("expected 1 node, got %v", resp.Nodes)
|
||||
}
|
||||
if (*resp.Nodes)[0].Name == nil || *(*resp.Nodes)[0].Name != "node02" {
|
||||
t.Errorf("Name = %v, want node02", (*resp.Nodes)[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Nodes.GetNode(ctx, "nonexistent", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown node, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Partitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetPartitions(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Partitions.GetPartitions(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPartitions failed: %v", err)
|
||||
}
|
||||
if resp.Partitions == nil {
|
||||
t.Fatal("Partitions is nil")
|
||||
}
|
||||
if len(*resp.Partitions) != 2 {
|
||||
t.Errorf("len(Partitions) = %d, want 2", len(*resp.Partitions))
|
||||
}
|
||||
names := map[string]bool{}
|
||||
for _, p := range *resp.Partitions {
|
||||
if p.Name != nil {
|
||||
names[*p.Name] = true
|
||||
}
|
||||
}
|
||||
if !names["normal"] || !names["gpu"] {
|
||||
t.Errorf("expected normal and gpu partitions, got %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPartition(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Partitions.GetPartition(ctx, "gpu", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPartition failed: %v", err)
|
||||
}
|
||||
if resp.Partitions == nil || len(*resp.Partitions) != 1 {
|
||||
t.Fatalf("expected 1 partition, got %v", resp.Partitions)
|
||||
}
|
||||
if (*resp.Partitions)[0].Name == nil || *(*resp.Partitions)[0].Name != "gpu" {
|
||||
t.Errorf("Name = %v, want gpu", (*resp.Partitions)[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPartitionNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Partitions.GetPartition(ctx, "nonexistent", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown partition, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Diag
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetDiag(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Diag.GetDiag(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDiag failed: %v", err)
|
||||
}
|
||||
if resp.Statistics == nil {
|
||||
t.Fatal("Statistics is nil")
|
||||
}
|
||||
if resp.Statistics.ServerThreadCount == nil || *resp.Statistics.ServerThreadCount != 3 {
|
||||
t.Errorf("ServerThreadCount = %v, want 3", resp.Statistics.ServerThreadCount)
|
||||
}
|
||||
if resp.Statistics.AgentQueueSize == nil || *resp.Statistics.AgentQueueSize != 0 {
|
||||
t.Errorf("AgentQueueSize = %v, want 0", resp.Statistics.AgentQueueSize)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: SlurmDB Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSlurmdbGetJobsEmpty(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 0 {
|
||||
t.Errorf("len(Jobs) = %d, want 0 (no history)", len(resp.Jobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobsAfterEviction(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "hist-job", "normal", "/tmp/h", "#!/bin/bash\necho hist")
|
||||
mock.SetJobState(jobID, "RUNNING")
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.Name == nil || *job.Name != "hist-job" {
|
||||
t.Errorf("Name = %v, want hist-job", job.Name)
|
||||
}
|
||||
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("State = %v, want current=[COMPLETED]", job.State)
|
||||
}
|
||||
if job.Time == nil || job.Time.Submission == nil {
|
||||
t.Error("Time.Submission should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobByID(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "single-hist", "normal", "/tmp/sh", "#!/bin/bash\nexit 1")
|
||||
mock.SetJobState(jobID, "FAILED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.JobID == nil || *job.JobID != jobID {
|
||||
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
|
||||
}
|
||||
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "FAILED" {
|
||||
t.Errorf("State = %v, want current=[FAILED]", job.State)
|
||||
}
|
||||
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
|
||||
t.Error("ExitCode should be set")
|
||||
} else if *job.ExitCode.ReturnCode.Number != 1 {
|
||||
t.Errorf("exit code = %d, want 1 for FAILED", *job.ExitCode.ReturnCode.Number)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.SlurmdbJobs.GetJob(ctx, "999")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown history job, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbJobStateIsNested(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "nested-state", "gpu", "/tmp/ns", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.State == nil {
|
||||
t.Fatal("State is nil — must be nested {current: [...], reason: \"\"}")
|
||||
}
|
||||
if len(job.State.Current) == 0 {
|
||||
t.Fatal("State.Current is empty")
|
||||
}
|
||||
if job.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("State.Current[0] = %q, want COMPLETED", job.State.Current[0])
|
||||
}
|
||||
if job.State.Reason == nil || *job.State.Reason != "" {
|
||||
t.Errorf("State.Reason = %v, want empty string", job.State.Reason)
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(job)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
rawStr := string(raw)
|
||||
if !strings.Contains(rawStr, `"state":{"current":["COMPLETED"]`) {
|
||||
t.Errorf("state JSON should use nested format, got: %s", rawStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbJobsFilterByName(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
id1 := submitTestJob(t, client, "match-me", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
id2 := submitTestJob(t, client, "other-job", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(id1, "COMPLETED")
|
||||
mock.SetJobState(id2, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, &slurm.GetSlurmdbJobsOptions{
|
||||
JobName: slurm.Ptr("match-me"),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs with filter: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1 (filtered by name)", len(resp.Jobs))
|
||||
}
|
||||
if resp.Jobs[0].Name == nil || *resp.Jobs[0].Name != "match-me" {
|
||||
t.Errorf("Name = %v, want match-me", resp.Jobs[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SetJobState terminal state exit codes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSetJobStateExitCodes(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
state string
|
||||
wantExit int64
|
||||
}{
|
||||
{"COMPLETED", 0},
|
||||
{"FAILED", 1},
|
||||
{"CANCELLED", 1},
|
||||
{"TIMEOUT", 1},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
jobID := submitTestJob(t, client, "exit-"+strconv.Itoa(i), "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(jobID, tc.state)
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob(%d) %s: %v", jobID, tc.state, err)
|
||||
}
|
||||
job := resp.Jobs[0]
|
||||
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
|
||||
t.Errorf("%s: ExitCode not set", tc.state)
|
||||
continue
|
||||
}
|
||||
if *job.ExitCode.ReturnCode.Number != tc.wantExit {
|
||||
t.Errorf("%s: exit code = %d, want %d", tc.state, *job.ExitCode.ReturnCode.Number, tc.wantExit)
|
||||
}
|
||||
}
|
||||
}
|
||||
134
internal/testutil/mockslurm/types.go
Normal file
134
internal/testutil/mockslurm/types.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package mockslurm provides response builder helpers that generate JSON
|
||||
// matching Openapi* types from the internal/slurm package.
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
// newMeta returns standard OpenapiMeta with plugin type "openapi/v0.0.40"
|
||||
// and Slurm version 24.05.0.
|
||||
func newMeta() slurm.OpenapiMeta {
|
||||
return slurm.OpenapiMeta{
|
||||
Plugin: &slurm.MetaPlugin{
|
||||
Type: slurm.Ptr("openapi/v0.0.40"),
|
||||
Name: slurm.Ptr("slurmrestd"),
|
||||
DataParser: slurm.Ptr("json/v0.0.40"),
|
||||
},
|
||||
Slurm: &slurm.MetaSlurm{
|
||||
Version: &slurm.MetaSlurmVersion{
|
||||
Major: slurm.Ptr("24"),
|
||||
Micro: slurm.Ptr("0"),
|
||||
Minor: slurm.Ptr("5"),
|
||||
},
|
||||
Release: slurm.Ptr("24.05.0"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewSubmitResponse builds an OpenapiJobSubmitResponse with the given jobID.
|
||||
func NewSubmitResponse(jobID int32) slurm.OpenapiJobSubmitResponse {
|
||||
return slurm.OpenapiJobSubmitResponse{
|
||||
Result: &slurm.JobSubmitResponseMsg{
|
||||
JobID: slurm.Ptr(jobID),
|
||||
},
|
||||
JobID: slurm.Ptr(jobID),
|
||||
StepID: slurm.Ptr("Scalar"),
|
||||
Meta: &slurm.OpenapiMeta{},
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobInfoResponse builds an OpenapiJobInfoResp wrapping the given jobs.
|
||||
func NewJobInfoResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiJobInfoResp{
|
||||
Jobs: jobs,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobListResponse is an alias for NewJobInfoResponse.
|
||||
func NewJobListResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
|
||||
return NewJobInfoResponse(jobs)
|
||||
}
|
||||
|
||||
// NewDeleteResponse builds an OpenapiResp with meta and empty errors/warnings.
|
||||
func NewDeleteResponse() slurm.OpenapiResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiResp{
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNodeResponse builds an OpenapiNodesResp wrapping the given nodes.
|
||||
func NewNodeResponse(nodes []slurm.Node) slurm.OpenapiNodesResp {
|
||||
meta := newMeta()
|
||||
n := slurm.Nodes(nodes)
|
||||
return slurm.OpenapiNodesResp{
|
||||
Nodes: &n,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewPartitionResponse builds an OpenapiPartitionResp wrapping the given partitions.
|
||||
func NewPartitionResponse(partitions []slurm.PartitionInfo) slurm.OpenapiPartitionResp {
|
||||
meta := newMeta()
|
||||
p := slurm.PartitionInfoMsg(partitions)
|
||||
return slurm.OpenapiPartitionResp{
|
||||
Partitions: &p,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDiagResponse builds an OpenapiDiagResp with stats and meta.
|
||||
func NewDiagResponse() slurm.OpenapiDiagResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiDiagResp{
|
||||
Statistics: &slurm.StatsMsg{
|
||||
ServerThreadCount: slurm.Ptr(int32(3)),
|
||||
AgentQueueSize: slurm.Ptr(int32(0)),
|
||||
JobsRunning: slurm.Ptr(int32(0)),
|
||||
JobsPending: slurm.Ptr(int32(0)),
|
||||
ScheduleQueueLength: slurm.Ptr(int32(0)),
|
||||
},
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobHistoryResponse builds an OpenapiSlurmdbdJobsResp wrapping the given SlurmDBD jobs.
|
||||
func NewJobHistoryResponse(jobs []slurm.Job) slurm.OpenapiSlurmdbdJobsResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiSlurmdbdJobsResp{
|
||||
Jobs: jobs,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// buildActiveJobState returns a flat string array for the active endpoint
|
||||
// job_state field (e.g. ["RUNNING"]).
|
||||
func buildActiveJobState(states ...string) []string {
|
||||
return states
|
||||
}
|
||||
|
||||
// buildHistoryJobState returns a nested JobState object for the SlurmDB
|
||||
// history endpoint (e.g. {current: ["COMPLETED"], reason: ""}).
|
||||
func buildHistoryJobState(states ...string) *slurm.JobState {
|
||||
return &slurm.JobState{
|
||||
Current: states,
|
||||
Reason: slurm.Ptr(""),
|
||||
}
|
||||
}
|
||||
291
internal/testutil/mockslurm/types_test.go
Normal file
291
internal/testutil/mockslurm/types_test.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
func TestNewSubmitResponse(t *testing.T) {
|
||||
resp := NewSubmitResponse(42)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobSubmitResponse
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Result == nil || got.Result.JobID == nil || *got.Result.JobID != 42 {
|
||||
t.Errorf("result.job_id = %v, want 42", got.Result)
|
||||
}
|
||||
if got.JobID == nil || *got.JobID != 42 {
|
||||
t.Errorf("job_id = %v, want 42", got.JobID)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
if _, ok := raw["job_id"]; !ok {
|
||||
t.Error("missing snake_case field 'job_id'")
|
||||
}
|
||||
if _, ok := raw["result"]; !ok {
|
||||
t.Error("missing field 'result'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobInfoResponse(t *testing.T) {
|
||||
jobs := []slurm.JobInfo{
|
||||
{JobID: slurm.Ptr(int32(1)), Name: slurm.Ptr("test1"), JobState: []string{"RUNNING"}},
|
||||
{JobID: slurm.Ptr(int32(2)), Name: slurm.Ptr("test2"), JobState: []string{"PENDING"}},
|
||||
}
|
||||
resp := NewJobInfoResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobInfoResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if len(got.Jobs) != 2 {
|
||||
t.Fatalf("len(jobs) = %d, want 2", len(got.Jobs))
|
||||
}
|
||||
if *got.Jobs[0].JobID != 1 {
|
||||
t.Errorf("jobs[0].job_id = %d, want 1", *got.Jobs[0].JobID)
|
||||
}
|
||||
if got.Jobs[0].JobState[0] != "RUNNING" {
|
||||
t.Errorf("jobs[0].job_state = %v, want [RUNNING]", got.Jobs[0].JobState)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
jobsRaw := raw["jobs"].([]interface{})
|
||||
job0 := jobsRaw[0].(map[string]interface{})
|
||||
stateRaw, ok := job0["job_state"]
|
||||
if !ok {
|
||||
t.Error("missing snake_case field 'job_state'")
|
||||
}
|
||||
stateArr, ok := stateRaw.([]interface{})
|
||||
if !ok || len(stateArr) != 1 || stateArr[0].(string) != "RUNNING" {
|
||||
t.Errorf("job_state = %v, want array [\"RUNNING\"]", stateRaw)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobListResponse(t *testing.T) {
|
||||
jobs := []slurm.JobInfo{
|
||||
{JobID: slurm.Ptr(int32(10)), Name: slurm.Ptr("listjob")},
|
||||
}
|
||||
resp := NewJobListResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobInfoResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if len(got.Jobs) != 1 || *got.Jobs[0].JobID != 10 {
|
||||
t.Errorf("jobs = %+v, want single job with id 10", got.Jobs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDeleteResponse(t *testing.T) {
|
||||
resp := NewDeleteResponse()
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Meta == nil {
|
||||
t.Error("meta is nil")
|
||||
}
|
||||
if len(got.Errors) != 0 {
|
||||
t.Errorf("errors = %v, want empty", got.Errors)
|
||||
}
|
||||
if len(got.Warnings) != 0 {
|
||||
t.Errorf("warnings = %v, want empty", got.Warnings)
|
||||
}
|
||||
|
||||
if got.Meta.Plugin == nil || got.Meta.Plugin.Type == nil || *got.Meta.Plugin.Type != "openapi/v0.0.40" {
|
||||
t.Error("meta.plugin.type missing or wrong")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeResponse(t *testing.T) {
|
||||
nodes := []slurm.Node{
|
||||
{Name: slurm.Ptr("node1"), State: []string{"IDLE"}},
|
||||
{Name: slurm.Ptr("node2"), State: []string{"ALLOCATED"}},
|
||||
}
|
||||
resp := NewNodeResponse(nodes)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiNodesResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Nodes == nil || len(*got.Nodes) != 2 {
|
||||
t.Fatalf("nodes = %v, want 2 nodes", got.Nodes)
|
||||
}
|
||||
gotNodes := *got.Nodes
|
||||
if *gotNodes[0].Name != "node1" {
|
||||
t.Errorf("nodes[0].name = %s, want node1", *gotNodes[0].Name)
|
||||
}
|
||||
if gotNodes[0].State[0] != "IDLE" {
|
||||
t.Errorf("nodes[0].state = %v, want [IDLE]", gotNodes[0].State)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPartitionResponse(t *testing.T) {
|
||||
partitions := []slurm.PartitionInfo{
|
||||
{Name: slurm.Ptr("normal")},
|
||||
{Name: slurm.Ptr("debug")},
|
||||
}
|
||||
resp := NewPartitionResponse(partitions)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiPartitionResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Partitions == nil || len(*got.Partitions) != 2 {
|
||||
t.Fatalf("partitions = %v, want 2", got.Partitions)
|
||||
}
|
||||
gotParts := *got.Partitions
|
||||
if *gotParts[0].Name != "normal" {
|
||||
t.Errorf("partitions[0].name = %s, want normal", *gotParts[0].Name)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
if _, ok := raw["partitions"]; !ok {
|
||||
t.Error("missing snake_case field 'partitions'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDiagResponse(t *testing.T) {
|
||||
resp := NewDiagResponse()
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiDiagResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Statistics == nil {
|
||||
t.Fatal("statistics is nil")
|
||||
}
|
||||
if got.Statistics.ServerThreadCount == nil || *got.Statistics.ServerThreadCount != 3 {
|
||||
t.Errorf("statistics.server_thread_count = %v, want 3", got.Statistics.ServerThreadCount)
|
||||
}
|
||||
if got.Meta == nil {
|
||||
t.Error("meta is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobHistoryResponse(t *testing.T) {
|
||||
jobs := []slurm.Job{
|
||||
{JobID: slurm.Ptr(int32(100)), Name: slurm.Ptr("histjob"), State: &slurm.JobState{Current: []string{"COMPLETED"}}},
|
||||
}
|
||||
resp := NewJobHistoryResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiSlurmdbdJobsResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if len(got.Jobs) != 1 {
|
||||
t.Fatalf("len(jobs) = %d, want 1", len(got.Jobs))
|
||||
}
|
||||
if *got.Jobs[0].JobID != 100 {
|
||||
t.Errorf("jobs[0].job_id = %d, want 100", *got.Jobs[0].JobID)
|
||||
}
|
||||
if got.Jobs[0].State == nil || len(got.Jobs[0].State.Current) != 1 || got.Jobs[0].State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("jobs[0].state = %+v, want current=[COMPLETED]", got.Jobs[0].State)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
jobsRaw := raw["jobs"].([]interface{})
|
||||
job0 := jobsRaw[0].(map[string]interface{})
|
||||
stateRaw := job0["state"].(map[string]interface{})
|
||||
currentRaw := stateRaw["current"].([]interface{})
|
||||
if currentRaw[0].(string) != "COMPLETED" {
|
||||
t.Errorf("state.current = %v, want [COMPLETED]", currentRaw)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildActiveJobState(t *testing.T) {
|
||||
states := buildActiveJobState("RUNNING", "COMPLETING")
|
||||
if len(states) != 2 {
|
||||
t.Fatalf("len = %d, want 2", len(states))
|
||||
}
|
||||
if states[0] != "RUNNING" || states[1] != "COMPLETING" {
|
||||
t.Errorf("states = %v, want [RUNNING, COMPLETING]", states)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildHistoryJobState(t *testing.T) {
|
||||
state := buildHistoryJobState("COMPLETED")
|
||||
if state == nil {
|
||||
t.Fatal("state is nil")
|
||||
}
|
||||
if len(state.Current) != 1 || state.Current[0] != "COMPLETED" {
|
||||
t.Errorf("current = %v, want [COMPLETED]", state.Current)
|
||||
}
|
||||
if state.Reason == nil || *state.Reason != "" {
|
||||
t.Errorf("reason = %v, want empty string", state.Reason)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
current, ok := raw["current"].([]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("current is not an array: %v", raw["current"])
|
||||
}
|
||||
if current[0].(string) != "COMPLETED" {
|
||||
t.Errorf("current[0] = %v, want COMPLETED", current[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetaFields(t *testing.T) {
|
||||
meta := newMeta()
|
||||
if meta.Plugin == nil || *meta.Plugin.Type != "openapi/v0.0.40" {
|
||||
t.Error("plugin.type not set correctly")
|
||||
}
|
||||
if meta.Slurm == nil || meta.Slurm.Version == nil {
|
||||
t.Fatal("slurm.version is nil")
|
||||
}
|
||||
if *meta.Slurm.Version.Major != "24" || *meta.Slurm.Version.Minor != "5" || *meta.Slurm.Version.Micro != "0" {
|
||||
t.Errorf("slurm.version = %v, want 24.5.0", meta.Slurm.Version)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user