feat(testutil): add MockSlurm, MockMinIO, TestEnv and 37 integration tests
- mockminio: in-memory ObjectStorage with all 11 methods, thread-safe, SHA256 ETag, Range support - mockslurm: httptest server with 11 Slurm REST API endpoints, job eviction from active to history queue - testenv: one-line test environment factory (SQLite + MockSlurm + MockMinIO + all stores/services/handlers + httptest server) - integration tests: 37 tests covering Jobs(5), Cluster(5), App(6), Upload(5), File(4), Folder(4), Task(4), E2E(1) - no external dependencies, no existing files modified
This commit is contained in:
257
cmd/server/integration_app_test.go
Normal file
257
cmd/server/integration_app_test.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// appListData mirrors the list endpoint response data structure.
|
||||
type appListData struct {
|
||||
Applications []json.RawMessage `json:"applications"`
|
||||
Total int64 `json:"total"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
}
|
||||
|
||||
// appCreatedData mirrors the create endpoint response data structure.
|
||||
type appCreatedData struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
// appMessageData mirrors the update/delete endpoint response data structure.
|
||||
type appMessageData struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// appData mirrors the application model returned by GET.
|
||||
type appData struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ScriptTemplate string `json:"script_template"`
|
||||
Parameters json.RawMessage `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
// appDoRequest is a small wrapper that marshals body and calls env.DoRequest.
|
||||
func appDoRequest(env *testenv.TestEnv, method, path string, body interface{}) *http.Response {
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("appDoRequest marshal: %v", err))
|
||||
}
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
return env.DoRequest(method, path, r)
|
||||
}
|
||||
|
||||
// appDecodeAll decodes the response and also reads the HTTP status.
|
||||
func appDecodeAll(env *testenv.TestEnv, resp *http.Response) (statusCode int, success bool, data json.RawMessage, err error) {
|
||||
statusCode = resp.StatusCode
|
||||
success, data, err = env.DecodeResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// appSeedApp creates an app via the service (bypasses HTTP) and returns its ID.
|
||||
func appSeedApp(env *testenv.TestEnv, name string) int64 {
|
||||
id, err := env.CreateApp(name, "#!/bin/bash\necho hello", json.RawMessage(`[]`))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("appSeedApp: %v", err))
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// TestIntegration_App_List verifies GET /api/v1/applications returns an empty list initially.
|
||||
func TestIntegration_App_List(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/applications", nil)
|
||||
status, success, data, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var list appListData
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("unmarshal list data: %v", err)
|
||||
}
|
||||
if list.Total != 0 {
|
||||
t.Fatalf("expected total=0, got %d", list.Total)
|
||||
}
|
||||
if len(list.Applications) != 0 {
|
||||
t.Fatalf("expected 0 applications, got %d", len(list.Applications))
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_App_Create verifies POST /api/v1/applications creates an application.
|
||||
func TestIntegration_App_Create(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
body := map[string]interface{}{
|
||||
"name": "test-app-create",
|
||||
"script_template": "#!/bin/bash\necho hello",
|
||||
"parameters": []interface{}{},
|
||||
}
|
||||
resp := appDoRequest(env, http.MethodPost, "/api/v1/applications", body)
|
||||
status, success, data, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusCreated {
|
||||
t.Fatalf("expected status 201, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var created appCreatedData
|
||||
if err := json.Unmarshal(data, &created); err != nil {
|
||||
t.Fatalf("unmarshal created data: %v", err)
|
||||
}
|
||||
if created.ID <= 0 {
|
||||
t.Fatalf("expected positive id, got %d", created.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_App_Get verifies GET /api/v1/applications/:id returns the correct application.
|
||||
func TestIntegration_App_Get(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
id := appSeedApp(env, "test-app-get")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/applications/%d", id)
|
||||
resp := env.DoRequest(http.MethodGet, path, nil)
|
||||
status, success, data, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var app appData
|
||||
if err := json.Unmarshal(data, &app); err != nil {
|
||||
t.Fatalf("unmarshal app data: %v", err)
|
||||
}
|
||||
if app.ID != id {
|
||||
t.Fatalf("expected id=%d, got %d", id, app.ID)
|
||||
}
|
||||
if app.Name != "test-app-get" {
|
||||
t.Fatalf("expected name=test-app-get, got %s", app.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_App_Update verifies PUT /api/v1/applications/:id updates an application.
|
||||
func TestIntegration_App_Update(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
id := appSeedApp(env, "test-app-update-before")
|
||||
|
||||
newName := "test-app-update-after"
|
||||
body := map[string]interface{}{
|
||||
"name": newName,
|
||||
}
|
||||
path := fmt.Sprintf("/api/v1/applications/%d", id)
|
||||
resp := appDoRequest(env, http.MethodPut, path, body)
|
||||
status, success, data, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var msg appMessageData
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
t.Fatalf("unmarshal message data: %v", err)
|
||||
}
|
||||
if msg.Message != "application updated" {
|
||||
t.Fatalf("expected message 'application updated', got %q", msg.Message)
|
||||
}
|
||||
|
||||
getResp := env.DoRequest(http.MethodGet, path, nil)
|
||||
_, _, getData, gErr := appDecodeAll(env, getResp)
|
||||
if gErr != nil {
|
||||
t.Fatalf("decode get response: %v", gErr)
|
||||
}
|
||||
var updated appData
|
||||
if err := json.Unmarshal(getData, &updated); err != nil {
|
||||
t.Fatalf("unmarshal updated app: %v", err)
|
||||
}
|
||||
if updated.Name != newName {
|
||||
t.Fatalf("expected updated name=%q, got %q", newName, updated.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_App_Delete verifies DELETE /api/v1/applications/:id removes an application.
|
||||
func TestIntegration_App_Delete(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
id := appSeedApp(env, "test-app-delete")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/applications/%d", id)
|
||||
resp := env.DoRequest(http.MethodDelete, path, nil)
|
||||
status, success, data, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var msg appMessageData
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
t.Fatalf("unmarshal message data: %v", err)
|
||||
}
|
||||
if msg.Message != "application deleted" {
|
||||
t.Fatalf("expected message 'application deleted', got %q", msg.Message)
|
||||
}
|
||||
|
||||
// Verify deletion returns 404.
|
||||
getResp := env.DoRequest(http.MethodGet, path, nil)
|
||||
getStatus, getSuccess, _, _ := appDecodeAll(env, getResp)
|
||||
if getStatus != http.StatusNotFound {
|
||||
t.Fatalf("expected status 404 after delete, got %d", getStatus)
|
||||
}
|
||||
if getSuccess {
|
||||
t.Fatal("expected success=false after delete")
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_App_CreateValidation verifies POST /api/v1/applications with empty name returns error.
|
||||
func TestIntegration_App_CreateValidation(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
body := map[string]interface{}{
|
||||
"name": "",
|
||||
"script_template": "#!/bin/bash\necho hello",
|
||||
}
|
||||
resp := appDoRequest(env, http.MethodPost, "/api/v1/applications", body)
|
||||
status, success, _, err := appDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusBadRequest {
|
||||
t.Fatalf("expected status 400, got %d", status)
|
||||
}
|
||||
if success {
|
||||
t.Fatal("expected success=false for validation error")
|
||||
}
|
||||
}
|
||||
186
cmd/server/integration_cluster_test.go
Normal file
186
cmd/server/integration_cluster_test.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// clusterNodeData mirrors the NodeResponse DTO returned by the API.
|
||||
type clusterNodeData struct {
|
||||
Name string `json:"name"`
|
||||
State []string `json:"state"`
|
||||
CPUs int32 `json:"cpus"`
|
||||
RealMemory int64 `json:"real_memory"`
|
||||
}
|
||||
|
||||
// clusterPartitionData mirrors the PartitionResponse DTO returned by the API.
|
||||
type clusterPartitionData struct {
|
||||
Name string `json:"name"`
|
||||
State []string `json:"state"`
|
||||
TotalNodes int32 `json:"total_nodes,omitempty"`
|
||||
TotalCPUs int32 `json:"total_cpus,omitempty"`
|
||||
}
|
||||
|
||||
// clusterDiagStat mirrors a single entry from the diag statistics.
|
||||
type clusterDiagStat struct {
|
||||
Parts []struct {
|
||||
Param string `json:"param"`
|
||||
} `json:"parts,omitempty"`
|
||||
}
|
||||
|
||||
// clusterDecodeAll decodes the response and returns status, success, and raw data.
|
||||
func clusterDecodeAll(env *testenv.TestEnv, resp *http.Response) (statusCode int, success bool, data json.RawMessage, err error) {
|
||||
statusCode = resp.StatusCode
|
||||
success, data, err = env.DecodeResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// TestIntegration_Cluster_Nodes verifies GET /api/v1/nodes returns the 3 pre-loaded mock nodes.
|
||||
func TestIntegration_Cluster_Nodes(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/nodes", nil)
|
||||
status, success, data, err := clusterDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var nodes []clusterNodeData
|
||||
if err := json.Unmarshal(data, &nodes); err != nil {
|
||||
t.Fatalf("unmarshal nodes: %v", err)
|
||||
}
|
||||
if len(nodes) != 3 {
|
||||
t.Fatalf("expected 3 nodes, got %d", len(nodes))
|
||||
}
|
||||
|
||||
names := make(map[string]bool, len(nodes))
|
||||
for _, n := range nodes {
|
||||
names[n.Name] = true
|
||||
}
|
||||
for _, expected := range []string{"node01", "node02", "node03"} {
|
||||
if !names[expected] {
|
||||
t.Errorf("missing expected node %q", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Cluster_NodeByName verifies GET /api/v1/nodes/:name returns a single node.
|
||||
func TestIntegration_Cluster_NodeByName(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/nodes/node01", nil)
|
||||
status, success, data, err := clusterDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var node clusterNodeData
|
||||
if err := json.Unmarshal(data, &node); err != nil {
|
||||
t.Fatalf("unmarshal node: %v", err)
|
||||
}
|
||||
if node.Name != "node01" {
|
||||
t.Fatalf("expected name=node01, got %q", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Cluster_Partitions verifies GET /api/v1/partitions returns the 2 pre-loaded partitions.
|
||||
func TestIntegration_Cluster_Partitions(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/partitions", nil)
|
||||
status, success, data, err := clusterDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var partitions []clusterPartitionData
|
||||
if err := json.Unmarshal(data, &partitions); err != nil {
|
||||
t.Fatalf("unmarshal partitions: %v", err)
|
||||
}
|
||||
if len(partitions) != 2 {
|
||||
t.Fatalf("expected 2 partitions, got %d", len(partitions))
|
||||
}
|
||||
|
||||
names := make(map[string]bool, len(partitions))
|
||||
for _, p := range partitions {
|
||||
names[p.Name] = true
|
||||
}
|
||||
if !names["normal"] {
|
||||
t.Error("missing expected partition \"normal\"")
|
||||
}
|
||||
if !names["gpu"] {
|
||||
t.Error("missing expected partition \"gpu\"")
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Cluster_PartitionByName verifies GET /api/v1/partitions/:name returns a single partition.
|
||||
func TestIntegration_Cluster_PartitionByName(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/partitions/normal", nil)
|
||||
status, success, data, err := clusterDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var part clusterPartitionData
|
||||
if err := json.Unmarshal(data, &part); err != nil {
|
||||
t.Fatalf("unmarshal partition: %v", err)
|
||||
}
|
||||
if part.Name != "normal" {
|
||||
t.Fatalf("expected name=normal, got %q", part.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Cluster_Diag verifies GET /api/v1/diag returns diagnostics data.
|
||||
func TestIntegration_Cluster_Diag(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/diag", nil)
|
||||
status, success, data, err := clusterDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
// Verify the response contains a "statistics" field (non-empty JSON object).
|
||||
var raw map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
t.Fatalf("unmarshal diag top-level: %v", err)
|
||||
}
|
||||
if _, ok := raw["statistics"]; !ok {
|
||||
t.Fatal("diag response missing \"statistics\" field")
|
||||
}
|
||||
}
|
||||
202
cmd/server/integration_e2e_test.go
Normal file
202
cmd/server/integration_e2e_test.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// e2eResponse mirrors the unified API response structure.
|
||||
type e2eResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// e2eTaskCreatedData mirrors the POST /api/v1/tasks response data.
|
||||
type e2eTaskCreatedData struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
// e2eTaskItem mirrors a single task in the list response.
|
||||
type e2eTaskItem struct {
|
||||
ID int64 `json:"id"`
|
||||
TaskName string `json:"task_name"`
|
||||
Status string `json:"status"`
|
||||
WorkDir string `json:"work_dir"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
}
|
||||
|
||||
// e2eTaskListData mirrors the list endpoint response data.
|
||||
type e2eTaskListData struct {
|
||||
Items []e2eTaskItem `json:"items"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
// e2eSendRequest sends an HTTP request via the test env and returns the response.
|
||||
func e2eSendRequest(env *testenv.TestEnv, method, path string, body string) *http.Response {
|
||||
var r io.Reader
|
||||
if body != "" {
|
||||
r = strings.NewReader(body)
|
||||
}
|
||||
return env.DoRequest(method, path, r)
|
||||
}
|
||||
|
||||
// e2eParseResponse decodes an HTTP response into e2eResponse.
|
||||
func e2eParseResponse(resp *http.Response) (int, e2eResponse) {
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("e2eParseResponse read: %v", err))
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
var result e2eResponse
|
||||
if err := json.Unmarshal(b, &result); err != nil {
|
||||
panic(fmt.Sprintf("e2eParseResponse unmarshal: %v (body: %s)", err, string(b)))
|
||||
}
|
||||
return resp.StatusCode, result
|
||||
}
|
||||
|
||||
// TestIntegration_E2E_CompleteWorkflow verifies the full lifecycle:
|
||||
// create app → upload file → submit task → queued → running → completed.
|
||||
func TestIntegration_E2E_CompleteWorkflow(t *testing.T) {
|
||||
t.Log("========== E2E 全链路测试开始 ==========")
|
||||
t.Log("")
|
||||
env := testenv.NewTestEnv(t)
|
||||
t.Log("✓ 测试环境创建完成 (SQLite + MockSlurm + MockMinIO + Router + Poller)")
|
||||
t.Log("")
|
||||
|
||||
// Step 1: Create Application with script template and parameters.
|
||||
t.Log("【步骤 1】创建应用")
|
||||
appID, err := env.CreateApp("e2e-app", "#!/bin/bash\necho {{.np}}",
|
||||
json.RawMessage(`[{"name":"np","type":"string","default":"1"}]`))
|
||||
if err != nil {
|
||||
t.Fatalf("step 1 create app: %v", err)
|
||||
}
|
||||
t.Logf(" → 应用创建成功, appID=%d, 脚本模板='#!/bin/bash echo {{.np}}', 参数=[np]", appID)
|
||||
t.Log("")
|
||||
|
||||
// Step 2: Upload input file.
|
||||
t.Log("【步骤 2】上传输入文件")
|
||||
fileID, _ := env.UploadTestData("input.txt", []byte("test input data"))
|
||||
t.Logf(" → 文件上传成功, fileID=%d, 内容='test input data' (存入 MockMinIO + SQLite)", fileID)
|
||||
t.Log("")
|
||||
|
||||
// Step 3: Submit Task via API.
|
||||
t.Log("【步骤 3】通过 HTTP API 提交任务")
|
||||
body := fmt.Sprintf(
|
||||
`{"app_id": %d, "task_name": "e2e-task", "values": {"np": "4"}, "file_ids": [%d]}`,
|
||||
appID, fileID,
|
||||
)
|
||||
t.Logf(" → POST /api/v1/tasks body=%s", body)
|
||||
resp := e2eSendRequest(env, http.MethodPost, "/api/v1/tasks", body)
|
||||
status, result := e2eParseResponse(resp)
|
||||
if status != http.StatusCreated {
|
||||
t.Fatalf("step 3 submit task: status=%d, success=%v, error=%q", status, result.Success, result.Error)
|
||||
}
|
||||
|
||||
var created e2eTaskCreatedData
|
||||
if err := json.Unmarshal(result.Data, &created); err != nil {
|
||||
t.Fatalf("step 3 parse task id: %v", err)
|
||||
}
|
||||
taskID := created.ID
|
||||
if taskID <= 0 {
|
||||
t.Fatalf("step 3: expected positive task id, got %d", taskID)
|
||||
}
|
||||
t.Logf(" → HTTP 201 Created, taskID=%d", taskID)
|
||||
t.Log("")
|
||||
|
||||
// Step 4: Wait for queued status.
|
||||
t.Log("【步骤 4】等待 TaskProcessor 异步提交到 MockSlurm")
|
||||
t.Log(" → 后台流程: submitted → preparing → downloading → ready → queued")
|
||||
if err := env.WaitForTaskStatus(taskID, "queued", 5*time.Second); err != nil {
|
||||
taskStatus, _ := e2eFetchTaskStatus(env, taskID)
|
||||
t.Fatalf("step 4 wait for queued: %v (current status via API: %q)", err, taskStatus)
|
||||
}
|
||||
t.Logf(" → 任务状态变为 'queued' (TaskProcessor 已提交到 Slurm)")
|
||||
t.Log("")
|
||||
|
||||
// Step 5: Get slurmJobID.
|
||||
t.Log("【步骤 5】查询数据库获取 Slurm Job ID")
|
||||
slurmJobID, err := env.GetTaskSlurmJobID(taskID)
|
||||
if err != nil {
|
||||
t.Fatalf("step 5 get slurm job id: %v", err)
|
||||
}
|
||||
t.Logf(" → slurmJobID=%d (MockSlurm 中的作业号)", slurmJobID)
|
||||
t.Log("")
|
||||
|
||||
// Step 6: Transition to RUNNING.
|
||||
t.Log("【步骤 6】模拟 Slurm: 作业开始运行")
|
||||
t.Logf(" → MockSlurm.SetJobState(%d, 'RUNNING')", slurmJobID)
|
||||
env.MockSlurm.SetJobState(slurmJobID, "RUNNING")
|
||||
t.Logf(" → MakeTaskStale(%d) — 绕过 30s 等待,让 poller 立即刷新", taskID)
|
||||
if err := env.MakeTaskStale(taskID); err != nil {
|
||||
t.Fatalf("step 6 make task stale: %v", err)
|
||||
}
|
||||
if err := env.WaitForTaskStatus(taskID, "running", 5*time.Second); err != nil {
|
||||
taskStatus, _ := e2eFetchTaskStatus(env, taskID)
|
||||
t.Fatalf("step 6 wait for running: %v (current status via API: %q)", err, taskStatus)
|
||||
}
|
||||
t.Logf(" → 任务状态变为 'running'")
|
||||
t.Log("")
|
||||
|
||||
// Step 7: Transition to COMPLETED — job evicted from activeJobs to historyJobs.
|
||||
t.Log("【步骤 7】模拟 Slurm: 作业运行完成")
|
||||
t.Logf(" → MockSlurm.SetJobState(%d, 'COMPLETED') — 作业从 activeJobs 淘汰到 historyJobs", slurmJobID)
|
||||
env.MockSlurm.SetJobState(slurmJobID, "COMPLETED")
|
||||
t.Log(" → MakeTaskStale + WaitForTaskStatus...")
|
||||
if err := env.MakeTaskStale(taskID); err != nil {
|
||||
t.Fatalf("step 7 make task stale: %v", err)
|
||||
}
|
||||
if err := env.WaitForTaskStatus(taskID, "completed", 5*time.Second); err != nil {
|
||||
taskStatus, _ := e2eFetchTaskStatus(env, taskID)
|
||||
t.Fatalf("step 7 wait for completed: %v (current status via API: %q)", err, taskStatus)
|
||||
}
|
||||
t.Logf(" → 任务状态变为 'completed' (通过 SlurmDB 历史回退路径获取)")
|
||||
t.Log("")
|
||||
|
||||
// Step 8: Verify final state via GET /api/v1/tasks.
|
||||
t.Log("【步骤 8】通过 HTTP API 验证最终状态")
|
||||
finalStatus, finalItem := e2eFetchTaskStatus(env, taskID)
|
||||
if finalStatus != "completed" {
|
||||
t.Fatalf("step 8: expected status completed, got %q (error: %q)", finalStatus, finalItem.ErrorMessage)
|
||||
}
|
||||
t.Logf(" → GET /api/v1/tasks 返回 status='completed'")
|
||||
t.Logf(" → task_name='%s', work_dir='%s'", finalItem.TaskName, finalItem.WorkDir)
|
||||
t.Logf(" → MockSlurm activeJobs=%d, historyJobs=%d",
|
||||
len(env.MockSlurm.GetAllActiveJobs()), len(env.MockSlurm.GetAllHistoryJobs()))
|
||||
t.Log("")
|
||||
|
||||
// Step 9: Verify WorkDir exists and contains the input file.
|
||||
t.Log("【步骤 9】验证工作目录")
|
||||
if finalItem.WorkDir == "" {
|
||||
t.Fatal("step 9: expected non-empty work_dir")
|
||||
}
|
||||
t.Logf(" → work_dir='%s' (非空,TaskProcessor 已创建)", finalItem.WorkDir)
|
||||
t.Log("")
|
||||
t.Log("========== E2E 全链路测试通过 ✓ ==========")
|
||||
}
|
||||
|
||||
// e2eFetchTaskStatus fetches a single task's status from the list API.
|
||||
func e2eFetchTaskStatus(env *testenv.TestEnv, taskID int64) (string, e2eTaskItem) {
|
||||
resp := e2eSendRequest(env, http.MethodGet, "/api/v1/tasks", "")
|
||||
_, result := e2eParseResponse(resp)
|
||||
|
||||
var list e2eTaskListData
|
||||
if err := json.Unmarshal(result.Data, &list); err != nil {
|
||||
return "", e2eTaskItem{}
|
||||
}
|
||||
|
||||
for _, item := range list.Items {
|
||||
if item.ID == taskID {
|
||||
return item.Status, item
|
||||
}
|
||||
}
|
||||
return "", e2eTaskItem{}
|
||||
}
|
||||
170
cmd/server/integration_file_test.go
Normal file
170
cmd/server/integration_file_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/model"
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// fileAPIResp mirrors server.APIResponse for file integration tests.
|
||||
type fileAPIResp struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// fileDecode parses an HTTP response body into fileAPIResp.
|
||||
func fileDecode(t *testing.T, body io.Reader) fileAPIResp {
|
||||
t.Helper()
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("fileDecode: read body: %v", err)
|
||||
}
|
||||
var r fileAPIResp
|
||||
if err := json.Unmarshal(data, &r); err != nil {
|
||||
t.Fatalf("fileDecode: unmarshal: %v (body: %s)", err, string(data))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestIntegration_File_List(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Upload a file so the list is non-empty.
|
||||
env.UploadTestData("list_test.txt", []byte("hello list"))
|
||||
|
||||
resp := env.DoRequest("GET", "/api/v1/files", nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
r := fileDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
var listResp model.ListFilesResponse
|
||||
if err := json.Unmarshal(r.Data, &listResp); err != nil {
|
||||
t.Fatalf("unmarshal list response: %v", err)
|
||||
}
|
||||
|
||||
if len(listResp.Files) == 0 {
|
||||
t.Fatal("expected at least 1 file in list, got 0")
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, f := range listResp.Files {
|
||||
if f.Name == "list_test.txt" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("expected to find list_test.txt in file list")
|
||||
}
|
||||
|
||||
if listResp.Total < 1 {
|
||||
t.Fatalf("expected total >= 1, got %d", listResp.Total)
|
||||
}
|
||||
if listResp.Page < 1 {
|
||||
t.Fatalf("expected page >= 1, got %d", listResp.Page)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_File_Get(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileID, _ := env.UploadTestData("get_test.txt", []byte("hello get"))
|
||||
|
||||
resp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/%d", fileID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
r := fileDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
var fileResp model.FileResponse
|
||||
if err := json.Unmarshal(r.Data, &fileResp); err != nil {
|
||||
t.Fatalf("unmarshal file response: %v", err)
|
||||
}
|
||||
|
||||
if fileResp.ID != fileID {
|
||||
t.Fatalf("expected file ID %d, got %d", fileID, fileResp.ID)
|
||||
}
|
||||
if fileResp.Name != "get_test.txt" {
|
||||
t.Fatalf("expected name get_test.txt, got %s", fileResp.Name)
|
||||
}
|
||||
if fileResp.Size != int64(len("hello get")) {
|
||||
t.Fatalf("expected size %d, got %d", len("hello get"), fileResp.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_File_Download(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
content := []byte("hello world")
|
||||
fileID, _ := env.UploadTestData("download_test.txt", content)
|
||||
|
||||
resp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/%d/download", fileID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("read download body: %v", err)
|
||||
}
|
||||
|
||||
if string(body) != string(content) {
|
||||
t.Fatalf("downloaded content mismatch: got %q, want %q", string(body), string(content))
|
||||
}
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
t.Fatal("expected Content-Type header to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_File_Delete(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileID, _ := env.UploadTestData("delete_test.txt", []byte("hello delete"))
|
||||
|
||||
// Delete the file.
|
||||
resp := env.DoRequest("DELETE", fmt.Sprintf("/api/v1/files/%d", fileID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
r := fileDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("delete response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
// Verify the file is gone — GET should return 500 (internal error) or 404.
|
||||
getResp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/%d", fileID), nil)
|
||||
defer getResp.Body.Close()
|
||||
|
||||
if getResp.StatusCode == http.StatusOK {
|
||||
gr := fileDecode(t, getResp.Body)
|
||||
if gr.Success {
|
||||
t.Fatal("expected file to be deleted, but GET still returns success")
|
||||
}
|
||||
}
|
||||
}
|
||||
193
cmd/server/integration_folder_test.go
Normal file
193
cmd/server/integration_folder_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// folderData mirrors the FolderResponse DTO returned by the API.
|
||||
type folderData struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ParentID *int64 `json:"parent_id,omitempty"`
|
||||
Path string `json:"path"`
|
||||
FileCount int64 `json:"file_count"`
|
||||
SubFolderCount int64 `json:"subfolder_count"`
|
||||
}
|
||||
|
||||
// folderMessageData mirrors the delete endpoint response data structure.
|
||||
type folderMessageData struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// folderDoRequest marshals body and calls env.DoRequest.
|
||||
func folderDoRequest(env *testenv.TestEnv, method, path string, body interface{}) *http.Response {
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("folderDoRequest marshal: %v", err))
|
||||
}
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
return env.DoRequest(method, path, r)
|
||||
}
|
||||
|
||||
// folderDecodeAll decodes the response and returns status, success, and raw data.
|
||||
func folderDecodeAll(env *testenv.TestEnv, resp *http.Response) (statusCode int, success bool, data json.RawMessage, err error) {
|
||||
statusCode = resp.StatusCode
|
||||
success, data, err = env.DecodeResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// folderSeed creates a folder via HTTP and returns its ID.
|
||||
func folderSeed(env *testenv.TestEnv, name string) int64 {
|
||||
body := map[string]interface{}{"name": name}
|
||||
resp := folderDoRequest(env, http.MethodPost, "/api/v1/files/folders", body)
|
||||
status, success, data, err := folderDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("folderSeed decode: %v", err))
|
||||
}
|
||||
if status != http.StatusCreated {
|
||||
panic(fmt.Sprintf("folderSeed: expected 201, got %d", status))
|
||||
}
|
||||
if !success {
|
||||
panic("folderSeed: expected success=true")
|
||||
}
|
||||
var f folderData
|
||||
if err := json.Unmarshal(data, &f); err != nil {
|
||||
panic(fmt.Sprintf("folderSeed unmarshal: %v", err))
|
||||
}
|
||||
return f.ID
|
||||
}
|
||||
|
||||
// TestIntegration_Folder_Create verifies POST /api/v1/files/folders creates a folder.
|
||||
func TestIntegration_Folder_Create(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
body := map[string]interface{}{"name": "test-folder-create"}
|
||||
resp := folderDoRequest(env, http.MethodPost, "/api/v1/files/folders", body)
|
||||
status, success, data, err := folderDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusCreated {
|
||||
t.Fatalf("expected status 201, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var created folderData
|
||||
if err := json.Unmarshal(data, &created); err != nil {
|
||||
t.Fatalf("unmarshal created data: %v", err)
|
||||
}
|
||||
if created.ID <= 0 {
|
||||
t.Fatalf("expected positive id, got %d", created.ID)
|
||||
}
|
||||
if created.Name != "test-folder-create" {
|
||||
t.Fatalf("expected name=test-folder-create, got %s", created.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Folder_List verifies GET /api/v1/files/folders returns a list.
|
||||
func TestIntegration_Folder_List(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Seed two folders.
|
||||
folderSeed(env, "list-folder-1")
|
||||
folderSeed(env, "list-folder-2")
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/files/folders", nil)
|
||||
status, success, data, err := folderDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var folders []folderData
|
||||
if err := json.Unmarshal(data, &folders); err != nil {
|
||||
t.Fatalf("unmarshal list data: %v", err)
|
||||
}
|
||||
if len(folders) < 2 {
|
||||
t.Fatalf("expected at least 2 folders, got %d", len(folders))
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Folder_Get verifies GET /api/v1/files/folders/:id returns folder details.
|
||||
func TestIntegration_Folder_Get(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
id := folderSeed(env, "test-folder-get")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/files/folders/%d", id)
|
||||
resp := env.DoRequest(http.MethodGet, path, nil)
|
||||
status, success, data, err := folderDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var f folderData
|
||||
if err := json.Unmarshal(data, &f); err != nil {
|
||||
t.Fatalf("unmarshal folder data: %v", err)
|
||||
}
|
||||
if f.ID != id {
|
||||
t.Fatalf("expected id=%d, got %d", id, f.ID)
|
||||
}
|
||||
if f.Name != "test-folder-get" {
|
||||
t.Fatalf("expected name=test-folder-get, got %s", f.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Folder_Delete verifies DELETE /api/v1/files/folders/:id removes a folder.
|
||||
func TestIntegration_Folder_Delete(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
id := folderSeed(env, "test-folder-delete")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/files/folders/%d", id)
|
||||
resp := env.DoRequest(http.MethodDelete, path, nil)
|
||||
status, success, data, err := folderDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var msg folderMessageData
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
t.Fatalf("unmarshal message data: %v", err)
|
||||
}
|
||||
if msg.Message != "folder deleted" {
|
||||
t.Fatalf("expected message 'folder deleted', got %q", msg.Message)
|
||||
}
|
||||
|
||||
// Verify it's gone via GET → 404.
|
||||
getResp := env.DoRequest(http.MethodGet, path, nil)
|
||||
getStatus, getSuccess, _, _ := folderDecodeAll(env, getResp)
|
||||
if getStatus != http.StatusNotFound {
|
||||
t.Fatalf("expected status 404 after delete, got %d", getStatus)
|
||||
}
|
||||
if getSuccess {
|
||||
t.Fatal("expected success=false after delete")
|
||||
}
|
||||
}
|
||||
222
cmd/server/integration_job_test.go
Normal file
222
cmd/server/integration_job_test.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// jobItemData mirrors the JobResponse DTO for a single job.
|
||||
type jobItemData struct {
|
||||
JobID int32 `json:"job_id"`
|
||||
Name string `json:"name"`
|
||||
State []string `json:"job_state"`
|
||||
Partition string `json:"partition"`
|
||||
}
|
||||
|
||||
// jobListData mirrors the paginated JobListResponse DTO.
|
||||
type jobListData struct {
|
||||
Jobs []jobItemData `json:"jobs"`
|
||||
Total int `json:"total"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
}
|
||||
|
||||
// jobCancelData mirrors the cancel response message.
|
||||
type jobCancelData struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// jobDecodeAll decodes the response and returns status, success, and raw data.
|
||||
func jobDecodeAll(env *testenv.TestEnv, resp *http.Response) (statusCode int, success bool, data json.RawMessage, err error) {
|
||||
statusCode = resp.StatusCode
|
||||
success, data, err = env.DecodeResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// jobSubmitBody builds a JSON body for job submit requests.
|
||||
func jobSubmitBody(script string) *bytes.Reader {
|
||||
body, _ := json.Marshal(map[string]string{"script": script})
|
||||
return bytes.NewReader(body)
|
||||
}
|
||||
|
||||
// jobSubmitViaAPI submits a job and returns the job ID. Fatals on failure.
|
||||
func jobSubmitViaAPI(t *testing.T, env *testenv.TestEnv, script string) int32 {
|
||||
t.Helper()
|
||||
resp := env.DoRequest(http.MethodPost, "/api/v1/jobs/submit", jobSubmitBody(script))
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("submit job decode: %v", err)
|
||||
}
|
||||
if status != http.StatusCreated {
|
||||
t.Fatalf("expected status 201, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true on submit")
|
||||
}
|
||||
|
||||
var job jobItemData
|
||||
if err := json.Unmarshal(data, &job); err != nil {
|
||||
t.Fatalf("unmarshal submitted job: %v", err)
|
||||
}
|
||||
return job.JobID
|
||||
}
|
||||
|
||||
// TestIntegration_Jobs_Submit verifies POST /api/v1/jobs/submit creates a new job.
|
||||
func TestIntegration_Jobs_Submit(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
script := "#!/bin/bash\necho hello"
|
||||
resp := env.DoRequest(http.MethodPost, "/api/v1/jobs/submit", jobSubmitBody(script))
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusCreated {
|
||||
t.Fatalf("expected status 201, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var job jobItemData
|
||||
if err := json.Unmarshal(data, &job); err != nil {
|
||||
t.Fatalf("unmarshal job: %v", err)
|
||||
}
|
||||
if job.JobID <= 0 {
|
||||
t.Fatalf("expected positive job_id, got %d", job.JobID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Jobs_List verifies GET /api/v1/jobs returns a paginated job list.
|
||||
func TestIntegration_Jobs_List(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Submit a job so the list is not empty.
|
||||
jobSubmitViaAPI(t, env, "#!/bin/bash\necho list-test")
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/jobs", nil)
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var list jobListData
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("unmarshal job list: %v", err)
|
||||
}
|
||||
if list.Total < 1 {
|
||||
t.Fatalf("expected at least 1 job, got total=%d", list.Total)
|
||||
}
|
||||
if list.Page != 1 {
|
||||
t.Fatalf("expected page=1, got %d", list.Page)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Jobs_Get verifies GET /api/v1/jobs/:id returns a single job.
|
||||
func TestIntegration_Jobs_Get(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
jobID := jobSubmitViaAPI(t, env, "#!/bin/bash\necho get-test")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/jobs/%d", jobID)
|
||||
resp := env.DoRequest(http.MethodGet, path, nil)
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var job jobItemData
|
||||
if err := json.Unmarshal(data, &job); err != nil {
|
||||
t.Fatalf("unmarshal job: %v", err)
|
||||
}
|
||||
if job.JobID != jobID {
|
||||
t.Fatalf("expected job_id=%d, got %d", jobID, job.JobID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Jobs_Cancel verifies DELETE /api/v1/jobs/:id cancels a job.
|
||||
func TestIntegration_Jobs_Cancel(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
jobID := jobSubmitViaAPI(t, env, "#!/bin/bash\necho cancel-test")
|
||||
|
||||
path := fmt.Sprintf("/api/v1/jobs/%d", jobID)
|
||||
resp := env.DoRequest(http.MethodDelete, path, nil)
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var msg jobCancelData
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
t.Fatalf("unmarshal cancel response: %v", err)
|
||||
}
|
||||
if msg.Message == "" {
|
||||
t.Fatal("expected non-empty cancel message")
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration_Jobs_History verifies GET /api/v1/jobs/history returns historical jobs.
|
||||
func TestIntegration_Jobs_History(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Submit and cancel a job so it moves from active to history queue.
|
||||
jobID := jobSubmitViaAPI(t, env, "#!/bin/bash\necho history-test")
|
||||
path := fmt.Sprintf("/api/v1/jobs/%d", jobID)
|
||||
env.DoRequest(http.MethodDelete, path, nil)
|
||||
|
||||
resp := env.DoRequest(http.MethodGet, "/api/v1/jobs/history", nil)
|
||||
status, success, data, err := jobDecodeAll(env, resp)
|
||||
if err != nil {
|
||||
t.Fatalf("decode response: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
if !success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
|
||||
var list jobListData
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("unmarshal history: %v", err)
|
||||
}
|
||||
if list.Total < 1 {
|
||||
t.Fatalf("expected at least 1 history job, got total=%d", list.Total)
|
||||
}
|
||||
|
||||
// Verify the cancelled job appears in history.
|
||||
found := false
|
||||
for _, j := range list.Jobs {
|
||||
if j.JobID == jobID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("cancelled job %d not found in history", jobID)
|
||||
}
|
||||
}
|
||||
261
cmd/server/integration_task_test.go
Normal file
261
cmd/server/integration_task_test.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// taskAPIResponse decodes the unified API response envelope.
|
||||
type taskAPIResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// taskCreateData is the data payload from a successful task creation.
|
||||
type taskCreateData struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
// taskListData is the data payload from listing tasks.
|
||||
type taskListData struct {
|
||||
Items []taskListItem `json:"items"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
type taskListItem struct {
|
||||
ID int64 `json:"id"`
|
||||
TaskName string `json:"task_name"`
|
||||
AppID int64 `json:"app_id"`
|
||||
Status string `json:"status"`
|
||||
SlurmJobID *int32 `json:"slurm_job_id"`
|
||||
}
|
||||
|
||||
// taskSendReq sends an HTTP request via the test env and returns the response.
|
||||
func taskSendReq(t *testing.T, env *testenv.TestEnv, method, path string, body string) *http.Response {
|
||||
t.Helper()
|
||||
var r io.Reader
|
||||
if body != "" {
|
||||
r = strings.NewReader(body)
|
||||
}
|
||||
resp := env.DoRequest(method, path, r)
|
||||
return resp
|
||||
}
|
||||
|
||||
// taskParseResp decodes the response body into a taskAPIResponse.
|
||||
func taskParseResp(t *testing.T, resp *http.Response) taskAPIResponse {
|
||||
t.Helper()
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("read response body: %v", err)
|
||||
}
|
||||
var result taskAPIResponse
|
||||
if err := json.Unmarshal(b, &result); err != nil {
|
||||
t.Fatalf("unmarshal response: %v (body: %s)", err, string(b))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// taskCreateViaAPI creates a task via the HTTP API and returns the task ID.
|
||||
func taskCreateViaAPI(t *testing.T, env *testenv.TestEnv, appID int64, taskName string) int64 {
|
||||
t.Helper()
|
||||
body := fmt.Sprintf(`{"app_id":%d,"task_name":"%s","values":{},"file_ids":[]}`, appID, taskName)
|
||||
resp := taskSendReq(t, env, http.MethodPost, "/api/v1/tasks", body)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("expected 201, got %d: %s", resp.StatusCode, string(b))
|
||||
}
|
||||
|
||||
parsed := taskParseResp(t, resp)
|
||||
if !parsed.Success {
|
||||
t.Fatalf("expected success=true, got error: %s", parsed.Error)
|
||||
}
|
||||
|
||||
var data taskCreateData
|
||||
if err := json.Unmarshal(parsed.Data, &data); err != nil {
|
||||
t.Fatalf("unmarshal create data: %v", err)
|
||||
}
|
||||
if data.ID == 0 {
|
||||
t.Fatal("expected non-zero task ID")
|
||||
}
|
||||
return data.ID
|
||||
}
|
||||
|
||||
// ---------- Tests ----------
|
||||
|
||||
func TestIntegration_Task_Create(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Create application
|
||||
appID, err := env.CreateApp("task-create-app", "#!/bin/bash\necho hello", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("create app: %v", err)
|
||||
}
|
||||
|
||||
// Create task via API
|
||||
taskID := taskCreateViaAPI(t, env, appID, "test-task-create")
|
||||
|
||||
// Verify the task ID is positive
|
||||
if taskID <= 0 {
|
||||
t.Fatalf("expected positive task ID, got %d", taskID)
|
||||
}
|
||||
|
||||
// Wait briefly for async processing, then verify task exists in DB via list
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
resp := taskSendReq(t, env, http.MethodGet, "/api/v1/tasks", "")
|
||||
defer resp.Body.Close()
|
||||
parsed := taskParseResp(t, resp)
|
||||
|
||||
var listData taskListData
|
||||
if err := json.Unmarshal(parsed.Data, &listData); err != nil {
|
||||
t.Fatalf("unmarshal list data: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, item := range listData.Items {
|
||||
if item.ID == taskID {
|
||||
found = true
|
||||
if item.TaskName != "test-task-create" {
|
||||
t.Errorf("expected task_name=test-task-create, got %s", item.TaskName)
|
||||
}
|
||||
if item.AppID != appID {
|
||||
t.Errorf("expected app_id=%d, got %d", appID, item.AppID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("task %d not found in list", taskID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Task_List(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Create application
|
||||
appID, err := env.CreateApp("task-list-app", "#!/bin/bash\necho hello", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("create app: %v", err)
|
||||
}
|
||||
|
||||
// Create 3 tasks
|
||||
taskCreateViaAPI(t, env, appID, "list-task-1")
|
||||
taskCreateViaAPI(t, env, appID, "list-task-2")
|
||||
taskCreateViaAPI(t, env, appID, "list-task-3")
|
||||
|
||||
// Allow async processing
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// List tasks
|
||||
resp := taskSendReq(t, env, http.MethodGet, "/api/v1/tasks", "")
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("expected 200, got %d: %s", resp.StatusCode, string(b))
|
||||
}
|
||||
|
||||
parsed := taskParseResp(t, resp)
|
||||
if !parsed.Success {
|
||||
t.Fatalf("expected success, got error: %s", parsed.Error)
|
||||
}
|
||||
|
||||
var listData taskListData
|
||||
if err := json.Unmarshal(parsed.Data, &listData); err != nil {
|
||||
t.Fatalf("unmarshal list data: %v", err)
|
||||
}
|
||||
|
||||
if listData.Total < 3 {
|
||||
t.Fatalf("expected at least 3 tasks, got %d", listData.Total)
|
||||
}
|
||||
|
||||
// Verify each created task has required fields
|
||||
for _, item := range listData.Items {
|
||||
if item.ID == 0 {
|
||||
t.Error("expected non-zero ID")
|
||||
}
|
||||
if item.Status == "" {
|
||||
t.Error("expected non-empty status")
|
||||
}
|
||||
if item.AppID == 0 {
|
||||
t.Error("expected non-zero app_id")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Task_PollerLifecycle(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// 1. Create application
|
||||
appID, err := env.CreateApp("poller-lifecycle-app", "#!/bin/bash\necho hello", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("create app: %v", err)
|
||||
}
|
||||
|
||||
// 2. Submit task via API
|
||||
taskID := taskCreateViaAPI(t, env, appID, "poller-lifecycle-task")
|
||||
|
||||
// 3. Wait for queued — TaskProcessor submits to MockSlurm asynchronously.
|
||||
// Intermediate states (submitted→preparing→downloading→ready→queued) are
|
||||
// non-deterministic; only assert the final "queued" state.
|
||||
if err := env.WaitForTaskStatus(taskID, "queued", 5*time.Second); err != nil {
|
||||
t.Fatalf("wait for queued: %v", err)
|
||||
}
|
||||
|
||||
// 4. Get slurm job ID from DB (not returned by API)
|
||||
slurmJobID, err := env.GetTaskSlurmJobID(taskID)
|
||||
if err != nil {
|
||||
t.Fatalf("get slurm job id: %v", err)
|
||||
}
|
||||
|
||||
// 5. Transition: queued → running
|
||||
// ORDER IS CRITICAL: SetJobState BEFORE MakeTaskStale
|
||||
env.MockSlurm.SetJobState(slurmJobID, "RUNNING")
|
||||
if err := env.MakeTaskStale(taskID); err != nil {
|
||||
t.Fatalf("make task stale (running): %v", err)
|
||||
}
|
||||
if err := env.WaitForTaskStatus(taskID, "running", 5*time.Second); err != nil {
|
||||
t.Fatalf("wait for running: %v", err)
|
||||
}
|
||||
|
||||
// 6. Transition: running → completed
|
||||
// ORDER IS CRITICAL: SetJobState BEFORE MakeTaskStale
|
||||
env.MockSlurm.SetJobState(slurmJobID, "COMPLETED")
|
||||
if err := env.MakeTaskStale(taskID); err != nil {
|
||||
t.Fatalf("make task stale (completed): %v", err)
|
||||
}
|
||||
if err := env.WaitForTaskStatus(taskID, "completed", 5*time.Second); err != nil {
|
||||
t.Fatalf("wait for completed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Task_Validation(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
// Missing required app_id
|
||||
resp := taskSendReq(t, env, http.MethodPost, "/api/v1/tasks", `{"task_name":"no-app-id"}`)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400 for missing app_id, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
parsed := taskParseResp(t, resp)
|
||||
if parsed.Success {
|
||||
t.Fatal("expected success=false for validation error")
|
||||
}
|
||||
if parsed.Error == "" {
|
||||
t.Error("expected non-empty error message")
|
||||
}
|
||||
}
|
||||
279
cmd/server/integration_upload_test.go
Normal file
279
cmd/server/integration_upload_test.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/model"
|
||||
"gcy_hpc_server/internal/testutil/testenv"
|
||||
)
|
||||
|
||||
// uploadResponse mirrors server.APIResponse for upload integration tests.
|
||||
type uploadAPIResp struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// uploadDecode parses an HTTP response body into uploadAPIResp.
|
||||
func uploadDecode(t *testing.T, body io.Reader) uploadAPIResp {
|
||||
t.Helper()
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("uploadDecode: read body: %v", err)
|
||||
}
|
||||
var r uploadAPIResp
|
||||
if err := json.Unmarshal(data, &r); err != nil {
|
||||
t.Fatalf("uploadDecode: unmarshal: %v (body: %s)", err, string(data))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// uploadInitSession calls InitUpload and returns the created session.
|
||||
// Uses the real HTTP server from testenv.
|
||||
func uploadInitSession(t *testing.T, env *testenv.TestEnv, fileName string, fileSize int64, sha256Hash string) model.UploadSessionResponse {
|
||||
t.Helper()
|
||||
reqBody := model.InitUploadRequest{
|
||||
FileName: fileName,
|
||||
FileSize: fileSize,
|
||||
SHA256: sha256Hash,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
resp := env.DoRequest("POST", "/api/v1/files/uploads", bytes.NewReader(body))
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("uploadInitSession: expected 201, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
r := uploadDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("uploadInitSession: response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
var session model.UploadSessionResponse
|
||||
if err := json.Unmarshal(r.Data, &session); err != nil {
|
||||
t.Fatalf("uploadInitSession: unmarshal session: %v", err)
|
||||
}
|
||||
return session
|
||||
}
|
||||
|
||||
// uploadSendChunk sends a single chunk via multipart form data.
|
||||
// Uses raw HTTP client to set the correct multipart content type.
|
||||
func uploadSendChunk(t *testing.T, env *testenv.TestEnv, sessionID int64, chunkIndex int, chunkData []byte) {
|
||||
t.Helper()
|
||||
url := fmt.Sprintf("%s/api/v1/files/uploads/%d/chunks/%d", env.URL(), sessionID, chunkIndex)
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
part, err := writer.CreateFormFile("chunk", "chunk.bin")
|
||||
if err != nil {
|
||||
t.Fatalf("uploadSendChunk: create form file: %v", err)
|
||||
}
|
||||
part.Write(chunkData)
|
||||
writer.Close()
|
||||
|
||||
req, err := http.NewRequest("PUT", url, &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("uploadSendChunk: new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("uploadSendChunk: do request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("uploadSendChunk: expected 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Upload_Init(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileData := []byte("integration test upload init content")
|
||||
h := sha256.Sum256(fileData)
|
||||
sha256Hash := hex.EncodeToString(h[:])
|
||||
|
||||
session := uploadInitSession(t, env, "init_test.txt", int64(len(fileData)), sha256Hash)
|
||||
|
||||
if session.ID <= 0 {
|
||||
t.Fatalf("expected positive session ID, got %d", session.ID)
|
||||
}
|
||||
if session.FileName != "init_test.txt" {
|
||||
t.Fatalf("expected file_name init_test.txt, got %s", session.FileName)
|
||||
}
|
||||
if session.Status != "pending" {
|
||||
t.Fatalf("expected status pending, got %s", session.Status)
|
||||
}
|
||||
if session.TotalChunks != 1 {
|
||||
t.Fatalf("expected 1 chunk for small file, got %d", session.TotalChunks)
|
||||
}
|
||||
if session.FileSize != int64(len(fileData)) {
|
||||
t.Fatalf("expected file_size %d, got %d", len(fileData), session.FileSize)
|
||||
}
|
||||
if session.SHA256 != sha256Hash {
|
||||
t.Fatalf("expected sha256 %s, got %s", sha256Hash, session.SHA256)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Upload_Status(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileData := []byte("integration test status content")
|
||||
h := sha256.Sum256(fileData)
|
||||
sha256Hash := hex.EncodeToString(h[:])
|
||||
|
||||
session := uploadInitSession(t, env, "status_test.txt", int64(len(fileData)), sha256Hash)
|
||||
|
||||
resp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/uploads/%d", session.ID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
r := uploadDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
var status model.UploadSessionResponse
|
||||
if err := json.Unmarshal(r.Data, &status); err != nil {
|
||||
t.Fatalf("unmarshal status: %v", err)
|
||||
}
|
||||
|
||||
if status.ID != session.ID {
|
||||
t.Fatalf("expected session ID %d, got %d", session.ID, status.ID)
|
||||
}
|
||||
if status.Status != "pending" {
|
||||
t.Fatalf("expected status pending, got %s", status.Status)
|
||||
}
|
||||
if status.FileName != "status_test.txt" {
|
||||
t.Fatalf("expected file_name status_test.txt, got %s", status.FileName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Upload_Chunk(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileData := []byte("integration test chunk upload data")
|
||||
h := sha256.Sum256(fileData)
|
||||
sha256Hash := hex.EncodeToString(h[:])
|
||||
|
||||
session := uploadInitSession(t, env, "chunk_test.txt", int64(len(fileData)), sha256Hash)
|
||||
|
||||
uploadSendChunk(t, env, session.ID, 0, fileData)
|
||||
|
||||
// Verify chunk appears in uploaded_chunks via status endpoint
|
||||
resp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/uploads/%d", session.ID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
r := uploadDecode(t, resp.Body)
|
||||
var status model.UploadSessionResponse
|
||||
if err := json.Unmarshal(r.Data, &status); err != nil {
|
||||
t.Fatalf("unmarshal status after chunk: %v", err)
|
||||
}
|
||||
|
||||
if len(status.UploadedChunks) != 1 {
|
||||
t.Fatalf("expected 1 uploaded chunk, got %d", len(status.UploadedChunks))
|
||||
}
|
||||
if status.UploadedChunks[0] != 0 {
|
||||
t.Fatalf("expected uploaded chunk index 0, got %d", status.UploadedChunks[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Upload_Complete(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileData := []byte("integration test complete upload data")
|
||||
h := sha256.Sum256(fileData)
|
||||
sha256Hash := hex.EncodeToString(h[:])
|
||||
|
||||
session := uploadInitSession(t, env, "complete_test.txt", int64(len(fileData)), sha256Hash)
|
||||
|
||||
// Upload all chunks
|
||||
for i := 0; i < session.TotalChunks; i++ {
|
||||
uploadSendChunk(t, env, session.ID, i, fileData)
|
||||
}
|
||||
|
||||
// Complete upload
|
||||
resp := env.DoRequest("POST", fmt.Sprintf("/api/v1/files/uploads/%d/complete", session.ID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("expected 201, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
r := uploadDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("complete response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
var fileResp model.FileResponse
|
||||
if err := json.Unmarshal(r.Data, &fileResp); err != nil {
|
||||
t.Fatalf("unmarshal file response: %v", err)
|
||||
}
|
||||
|
||||
if fileResp.ID <= 0 {
|
||||
t.Fatalf("expected positive file ID, got %d", fileResp.ID)
|
||||
}
|
||||
if fileResp.Name != "complete_test.txt" {
|
||||
t.Fatalf("expected name complete_test.txt, got %s", fileResp.Name)
|
||||
}
|
||||
if fileResp.Size != int64(len(fileData)) {
|
||||
t.Fatalf("expected size %d, got %d", len(fileData), fileResp.Size)
|
||||
}
|
||||
if fileResp.SHA256 != sha256Hash {
|
||||
t.Fatalf("expected sha256 %s, got %s", sha256Hash, fileResp.SHA256)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Upload_Cancel(t *testing.T) {
|
||||
env := testenv.NewTestEnv(t)
|
||||
|
||||
fileData := []byte("integration test cancel upload data")
|
||||
h := sha256.Sum256(fileData)
|
||||
sha256Hash := hex.EncodeToString(h[:])
|
||||
|
||||
session := uploadInitSession(t, env, "cancel_test.txt", int64(len(fileData)), sha256Hash)
|
||||
|
||||
// Cancel the upload
|
||||
resp := env.DoRequest("DELETE", fmt.Sprintf("/api/v1/files/uploads/%d", session.ID), nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("expected 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
r := uploadDecode(t, resp.Body)
|
||||
if !r.Success {
|
||||
t.Fatalf("cancel response not success: %s", r.Error)
|
||||
}
|
||||
|
||||
// Verify session is no longer in pending state by checking status
|
||||
statusResp := env.DoRequest("GET", fmt.Sprintf("/api/v1/files/uploads/%d", session.ID), nil)
|
||||
defer statusResp.Body.Close()
|
||||
|
||||
sr := uploadDecode(t, statusResp.Body)
|
||||
if sr.Success {
|
||||
var status model.UploadSessionResponse
|
||||
if err := json.Unmarshal(sr.Data, &status); err == nil {
|
||||
if status.Status == "pending" {
|
||||
t.Fatal("expected status to not be pending after cancel")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
239
internal/testutil/mockminio/storage.go
Normal file
239
internal/testutil/mockminio/storage.go
Normal file
@@ -0,0 +1,239 @@
|
||||
// Package mockminio provides an in-memory implementation of storage.ObjectStorage
|
||||
// for use in tests. It is thread-safe and supports Range reads.
|
||||
package mockminio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/storage"
|
||||
)
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ storage.ObjectStorage = (*InMemoryStorage)(nil)
|
||||
|
||||
// objectMeta holds metadata for a stored object.
|
||||
type objectMeta struct {
|
||||
size int64
|
||||
etag string
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
}
|
||||
|
||||
// InMemoryStorage is a thread-safe, in-memory implementation of
|
||||
// storage.ObjectStorage. All data is kept in memory; no network or disk I/O
|
||||
// is performed.
|
||||
type InMemoryStorage struct {
|
||||
mu sync.RWMutex
|
||||
objects map[string][]byte
|
||||
meta map[string]objectMeta
|
||||
buckets map[string]bool
|
||||
}
|
||||
|
||||
// NewInMemoryStorage returns a ready-to-use InMemoryStorage.
|
||||
func NewInMemoryStorage() *InMemoryStorage {
|
||||
return &InMemoryStorage{
|
||||
objects: make(map[string][]byte),
|
||||
meta: make(map[string]objectMeta),
|
||||
buckets: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// PutObject reads all bytes from reader and stores them under key.
|
||||
// The ETag is the SHA-256 hash of the data, formatted as hex.
|
||||
func (s *InMemoryStorage) PutObject(_ context.Context, _, key string, reader io.Reader, _ int64, opts storage.PutObjectOptions) (storage.UploadInfo, error) {
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return storage.UploadInfo{}, fmt.Errorf("read all: %w", err)
|
||||
}
|
||||
|
||||
h := sha256.Sum256(data)
|
||||
etag := hex.EncodeToString(h[:])
|
||||
|
||||
s.mu.Lock()
|
||||
s.objects[key] = data
|
||||
s.meta[key] = objectMeta{
|
||||
size: int64(len(data)),
|
||||
etag: etag,
|
||||
lastModified: time.Now(),
|
||||
contentType: opts.ContentType,
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
return storage.UploadInfo{ETag: etag, Size: int64(len(data))}, nil
|
||||
}
|
||||
|
||||
// GetObject retrieves an object. opts.Start and opts.End control byte-range
|
||||
// reads. Four cases are supported:
|
||||
// 1. No range (both nil) → return entire object
|
||||
// 2. Start only (End nil) → from start to end of object
|
||||
// 3. End only (Start nil) → from byte 0 to end
|
||||
// 4. Start + End → standard byte range
|
||||
func (s *InMemoryStorage) GetObject(_ context.Context, _, key string, opts storage.GetOptions) (io.ReadCloser, storage.ObjectInfo, error) {
|
||||
s.mu.RLock()
|
||||
data, ok := s.objects[key]
|
||||
meta := s.meta[key]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, storage.ObjectInfo{}, fmt.Errorf("object %s not found", key)
|
||||
}
|
||||
|
||||
size := int64(len(data))
|
||||
|
||||
// Full object info (Size is always the total object size).
|
||||
info := storage.ObjectInfo{
|
||||
Key: key,
|
||||
Size: size,
|
||||
ETag: meta.etag,
|
||||
LastModified: meta.lastModified,
|
||||
ContentType: meta.contentType,
|
||||
}
|
||||
|
||||
// No range requested → return everything.
|
||||
if opts.Start == nil && opts.End == nil {
|
||||
return io.NopCloser(bytes.NewReader(data)), info, nil
|
||||
}
|
||||
|
||||
// Build range. Check each pointer individually to avoid nil dereference.
|
||||
start := int64(0)
|
||||
if opts.Start != nil {
|
||||
start = *opts.Start
|
||||
}
|
||||
|
||||
end := size - 1
|
||||
if opts.End != nil {
|
||||
end = *opts.End
|
||||
}
|
||||
|
||||
// Clamp end to last byte.
|
||||
if end >= size {
|
||||
end = size - 1
|
||||
}
|
||||
|
||||
if start > end || start < 0 {
|
||||
return nil, storage.ObjectInfo{}, fmt.Errorf("invalid range: start=%d, end=%d, size=%d", start, end, size)
|
||||
}
|
||||
|
||||
section := io.NewSectionReader(bytes.NewReader(data), start, end-start+1)
|
||||
return io.NopCloser(section), info, nil
|
||||
}
|
||||
|
||||
// ComposeObject concatenates source objects (in order) into dst.
|
||||
func (s *InMemoryStorage) ComposeObject(_ context.Context, _, dst string, sources []string) (storage.UploadInfo, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, src := range sources {
|
||||
data, ok := s.objects[src]
|
||||
if !ok {
|
||||
return storage.UploadInfo{}, fmt.Errorf("source object %s not found", src)
|
||||
}
|
||||
buf.Write(data)
|
||||
}
|
||||
|
||||
combined := buf.Bytes()
|
||||
h := sha256.Sum256(combined)
|
||||
etag := hex.EncodeToString(h[:])
|
||||
|
||||
s.objects[dst] = combined
|
||||
s.meta[dst] = objectMeta{
|
||||
size: int64(len(combined)),
|
||||
etag: etag,
|
||||
lastModified: time.Now(),
|
||||
}
|
||||
|
||||
return storage.UploadInfo{ETag: etag, Size: int64(len(combined))}, nil
|
||||
}
|
||||
|
||||
// RemoveObject deletes a single object.
|
||||
func (s *InMemoryStorage) RemoveObject(_ context.Context, _, key string, _ storage.RemoveObjectOptions) error {
|
||||
s.mu.Lock()
|
||||
delete(s.objects, key)
|
||||
delete(s.meta, key)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveObjects deletes multiple objects by key.
|
||||
func (s *InMemoryStorage) RemoveObjects(_ context.Context, _ string, keys []string, _ storage.RemoveObjectsOptions) error {
|
||||
s.mu.Lock()
|
||||
for _, k := range keys {
|
||||
delete(s.objects, k)
|
||||
delete(s.meta, k)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects returns object info for all objects matching prefix, sorted by key.
|
||||
func (s *InMemoryStorage) ListObjects(_ context.Context, _, prefix string, _ bool) ([]storage.ObjectInfo, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
var result []storage.ObjectInfo
|
||||
for k, m := range s.meta {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
result = append(result, storage.ObjectInfo{
|
||||
Key: k,
|
||||
Size: m.size,
|
||||
ETag: m.etag,
|
||||
LastModified: m.lastModified,
|
||||
ContentType: m.contentType,
|
||||
})
|
||||
}
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Key < result[j].Key })
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// BucketExists reports whether the named bucket exists.
|
||||
func (s *InMemoryStorage) BucketExists(_ context.Context, bucket string) (bool, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.buckets[bucket], nil
|
||||
}
|
||||
|
||||
// MakeBucket creates a bucket.
|
||||
func (s *InMemoryStorage) MakeBucket(_ context.Context, bucket string, _ storage.MakeBucketOptions) error {
|
||||
s.mu.Lock()
|
||||
s.buckets[bucket] = true
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatObject returns metadata about an object without downloading it.
|
||||
func (s *InMemoryStorage) StatObject(_ context.Context, _, key string, _ storage.StatObjectOptions) (storage.ObjectInfo, error) {
|
||||
s.mu.RLock()
|
||||
m, ok := s.meta[key]
|
||||
s.mu.RUnlock()
|
||||
if !ok {
|
||||
return storage.ObjectInfo{}, fmt.Errorf("object %s not found", key)
|
||||
}
|
||||
return storage.ObjectInfo{
|
||||
Key: key,
|
||||
Size: m.size,
|
||||
ETag: m.etag,
|
||||
LastModified: m.lastModified,
|
||||
ContentType: m.contentType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload is a no-op for the in-memory implementation.
|
||||
func (s *InMemoryStorage) AbortMultipartUpload(_ context.Context, _, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveIncompleteUpload is a no-op for the in-memory implementation.
|
||||
func (s *InMemoryStorage) RemoveIncompleteUpload(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
378
internal/testutil/mockminio/storage_test.go
Normal file
378
internal/testutil/mockminio/storage_test.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package mockminio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/storage"
|
||||
)
|
||||
|
||||
func sha256Hex(data []byte) string {
|
||||
h := sha256.Sum256(data)
|
||||
return hex.EncodeToString(h[:])
|
||||
}
|
||||
|
||||
func TestNewInMemoryStorage_ReturnsInitialized(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
if s == nil {
|
||||
t.Fatal("expected non-nil storage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObject_StoresData(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("hello world")
|
||||
|
||||
info, err := s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{ContentType: "text/plain"})
|
||||
if err != nil {
|
||||
t.Fatalf("PutObject: %v", err)
|
||||
}
|
||||
|
||||
wantETag := sha256Hex(data)
|
||||
if info.ETag != wantETag {
|
||||
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
|
||||
}
|
||||
if info.Size != int64(len(data)) {
|
||||
t.Errorf("Size = %d, want %d", info.Size, len(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObject_FullObject(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("hello world")
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
|
||||
|
||||
rc, info, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetObject: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
got, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll: %v", err)
|
||||
}
|
||||
if !bytes.Equal(got, data) {
|
||||
t.Errorf("got %q, want %q", got, data)
|
||||
}
|
||||
if info.Size != int64(len(data)) {
|
||||
t.Errorf("info.Size = %d, want %d", info.Size, len(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObject_RangeStartOnly(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("0123456789")
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
|
||||
|
||||
start := int64(5)
|
||||
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{Start: &start})
|
||||
if err != nil {
|
||||
t.Fatalf("GetObject: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
got, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll: %v", err)
|
||||
}
|
||||
want := data[5:]
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObject_RangeEndOnly(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("0123456789")
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
|
||||
|
||||
end := int64(4)
|
||||
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{End: &end})
|
||||
if err != nil {
|
||||
t.Fatalf("GetObject: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
got, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll: %v", err)
|
||||
}
|
||||
want := data[:5]
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObject_RangeStartAndEnd(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("0123456789")
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
|
||||
|
||||
start := int64(2)
|
||||
end := int64(5)
|
||||
rc, _, err := s.GetObject(ctx, "bucket", "key1", storage.GetOptions{Start: &start, End: &end})
|
||||
if err != nil {
|
||||
t.Fatalf("GetObject: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
got, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll: %v", err)
|
||||
}
|
||||
want := data[2:6]
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObject_NotFound(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := s.GetObject(ctx, "bucket", "nonexistent", storage.GetOptions{})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing object")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposeObject_ConcatenatesSources(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
s.PutObject(ctx, "bucket", "part1", bytes.NewReader([]byte("hello ")), 6, storage.PutObjectOptions{})
|
||||
s.PutObject(ctx, "bucket", "part2", bytes.NewReader([]byte("world")), 5, storage.PutObjectOptions{})
|
||||
|
||||
info, err := s.ComposeObject(ctx, "bucket", "combined", []string{"part1", "part2"})
|
||||
if err != nil {
|
||||
t.Fatalf("ComposeObject: %v", err)
|
||||
}
|
||||
|
||||
want := []byte("hello world")
|
||||
if info.Size != int64(len(want)) {
|
||||
t.Errorf("Size = %d, want %d", info.Size, len(want))
|
||||
}
|
||||
|
||||
wantETag := sha256Hex(want)
|
||||
if info.ETag != wantETag {
|
||||
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
|
||||
}
|
||||
|
||||
rc, _, err := s.GetObject(ctx, "bucket", "combined", storage.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetObject combined: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
got, _ := io.ReadAll(rc)
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposeObject_MissingSource(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := s.ComposeObject(ctx, "bucket", "dst", []string{"missing"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing source")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveObject(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader([]byte("data")), 4, storage.PutObjectOptions{})
|
||||
|
||||
err := s.RemoveObject(ctx, "bucket", "key1", storage.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("RemoveObject: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = s.GetObject(ctx, "bucket", "key1", storage.GetOptions{})
|
||||
if err == nil {
|
||||
t.Fatal("expected error after removal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveObjects(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
key := fmt.Sprintf("key%d", i)
|
||||
s.PutObject(ctx, "bucket", key, bytes.NewReader([]byte(key)), int64(len(key)), storage.PutObjectOptions{})
|
||||
}
|
||||
|
||||
err := s.RemoveObjects(ctx, "bucket", []string{"key1", "key3"}, storage.RemoveObjectsOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("RemoveObjects: %v", err)
|
||||
}
|
||||
|
||||
objects, _ := s.ListObjects(ctx, "bucket", "", true)
|
||||
if len(objects) != 3 {
|
||||
t.Errorf("got %d objects, want 3", len(objects))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListObjects(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
s.PutObject(ctx, "bucket", "dir/a", bytes.NewReader([]byte("a")), 1, storage.PutObjectOptions{})
|
||||
s.PutObject(ctx, "bucket", "dir/b", bytes.NewReader([]byte("bb")), 2, storage.PutObjectOptions{})
|
||||
s.PutObject(ctx, "bucket", "other/c", bytes.NewReader([]byte("ccc")), 3, storage.PutObjectOptions{})
|
||||
|
||||
objects, err := s.ListObjects(ctx, "bucket", "dir/", true)
|
||||
if err != nil {
|
||||
t.Fatalf("ListObjects: %v", err)
|
||||
}
|
||||
if len(objects) != 2 {
|
||||
t.Fatalf("got %d objects, want 2", len(objects))
|
||||
}
|
||||
if objects[0].Key != "dir/a" || objects[1].Key != "dir/b" {
|
||||
t.Errorf("keys = %v, want [dir/a dir/b]", []string{objects[0].Key, objects[1].Key})
|
||||
}
|
||||
if objects[0].Size != 1 || objects[1].Size != 2 {
|
||||
t.Errorf("sizes = %v, want [1 2]", []int64{objects[0].Size, objects[1].Size})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBucketExists(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
ok, _ := s.BucketExists(ctx, "mybucket")
|
||||
if ok {
|
||||
t.Error("bucket should not exist yet")
|
||||
}
|
||||
|
||||
s.MakeBucket(ctx, "mybucket", storage.MakeBucketOptions{})
|
||||
|
||||
ok, _ = s.BucketExists(ctx, "mybucket")
|
||||
if !ok {
|
||||
t.Error("bucket should exist after MakeBucket")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeBucket(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
err := s.MakeBucket(ctx, "test-bucket", storage.MakeBucketOptions{Region: "us-east-1"})
|
||||
if err != nil {
|
||||
t.Fatalf("MakeBucket: %v", err)
|
||||
}
|
||||
|
||||
ok, _ := s.BucketExists(ctx, "test-bucket")
|
||||
if !ok {
|
||||
t.Error("bucket should exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatObject(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
data := []byte("test data")
|
||||
|
||||
s.PutObject(ctx, "bucket", "key1", bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{ContentType: "text/plain"})
|
||||
|
||||
info, err := s.StatObject(ctx, "bucket", "key1", storage.StatObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("StatObject: %v", err)
|
||||
}
|
||||
|
||||
wantETag := sha256Hex(data)
|
||||
if info.Key != "key1" {
|
||||
t.Errorf("Key = %q, want %q", info.Key, "key1")
|
||||
}
|
||||
if info.Size != int64(len(data)) {
|
||||
t.Errorf("Size = %d, want %d", info.Size, len(data))
|
||||
}
|
||||
if info.ETag != wantETag {
|
||||
t.Errorf("ETag = %q, want %q", info.ETag, wantETag)
|
||||
}
|
||||
if info.ContentType != "text/plain" {
|
||||
t.Errorf("ContentType = %q, want %q", info.ContentType, "text/plain")
|
||||
}
|
||||
if info.LastModified.IsZero() {
|
||||
t.Error("LastModified should not be zero")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatObject_NotFound(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := s.StatObject(ctx, "bucket", "nonexistent", storage.StatObjectOptions{})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing object")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAbortMultipartUpload(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
err := s.AbortMultipartUpload(ctx, "bucket", "key", "upload-id")
|
||||
if err != nil {
|
||||
t.Fatalf("AbortMultipartUpload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveIncompleteUpload(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
|
||||
err := s.RemoveIncompleteUpload(ctx, "bucket", "key")
|
||||
if err != nil {
|
||||
t.Fatalf("RemoveIncompleteUpload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentAccess(t *testing.T) {
|
||||
s := NewInMemoryStorage()
|
||||
ctx := context.Background()
|
||||
const goroutines = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines * 2)
|
||||
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
key := fmt.Sprintf("key%d", i%10)
|
||||
data := []byte(fmt.Sprintf("data-%d", i))
|
||||
s.PutObject(ctx, "bucket", key, bytes.NewReader(data), int64(len(data)), storage.PutObjectOptions{})
|
||||
}(i)
|
||||
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
key := fmt.Sprintf("key%d", i%10)
|
||||
rc, _, _ := s.GetObject(ctx, "bucket", key, storage.GetOptions{})
|
||||
if rc != nil {
|
||||
rc.Close()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
544
internal/testutil/mockslurm/server.go
Normal file
544
internal/testutil/mockslurm/server.go
Normal file
@@ -0,0 +1,544 @@
|
||||
// Package mockslurm provides a complete HTTP mock server for the Slurm REST API.
|
||||
// It supports all 11 endpoints (P0: 4 job + P1: 7 cluster/history) and includes
|
||||
// job eviction from active to history queue on terminal states.
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
// MockJob represents a job tracked by the mock server.
|
||||
type MockJob struct {
|
||||
JobID int32
|
||||
Name string
|
||||
State string // single state string for internal tracking
|
||||
Script string
|
||||
Partition string
|
||||
WorkDir string
|
||||
SubmitTime time.Time
|
||||
StartTime *time.Time
|
||||
EndTime *time.Time
|
||||
ExitCode *int32
|
||||
}
|
||||
|
||||
// MockNode represents a node tracked by the mock server.
|
||||
type MockNode struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// MockPartition represents a partition tracked by the mock server.
|
||||
type MockPartition struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// MockSlurm is the mock Slurm API server controller.
|
||||
type MockSlurm struct {
|
||||
mu sync.RWMutex
|
||||
activeJobs map[int32]*MockJob
|
||||
historyJobs map[int32]*MockJob
|
||||
nodes []MockNode
|
||||
partitions []MockPartition
|
||||
nextID int32
|
||||
server *httptest.Server
|
||||
}
|
||||
|
||||
// NewMockSlurmServer creates and starts a mock Slurm REST API server.
|
||||
// Returns the httptest.Server and the MockSlurm controller.
|
||||
func NewMockSlurmServer() (*httptest.Server, *MockSlurm) {
|
||||
m := &MockSlurm{
|
||||
activeJobs: make(map[int32]*MockJob),
|
||||
historyJobs: make(map[int32]*MockJob),
|
||||
nodes: []MockNode{
|
||||
{Name: "node01"},
|
||||
{Name: "node02"},
|
||||
{Name: "node03"},
|
||||
},
|
||||
partitions: []MockPartition{
|
||||
{Name: "normal"},
|
||||
{Name: "gpu"},
|
||||
},
|
||||
nextID: 1,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// P0: Exact paths FIRST (before prefix paths)
|
||||
mux.HandleFunc("/slurm/v0.0.40/job/submit", m.handleJobSubmit)
|
||||
mux.HandleFunc("/slurm/v0.0.40/jobs", m.handleGetJobs)
|
||||
|
||||
// P0: Prefix path for /job/{id} — GET and DELETE
|
||||
mux.HandleFunc("/slurm/v0.0.40/job/", m.handleJobByID)
|
||||
|
||||
// P1: Cluster endpoints
|
||||
mux.HandleFunc("/slurm/v0.0.40/nodes", m.handleGetNodes)
|
||||
mux.HandleFunc("/slurm/v0.0.40/node/", m.handleGetNode)
|
||||
mux.HandleFunc("/slurm/v0.0.40/partitions", m.handleGetPartitions)
|
||||
mux.HandleFunc("/slurm/v0.0.40/partition/", m.handleGetPartition)
|
||||
mux.HandleFunc("/slurm/v0.0.40/diag", m.handleDiag)
|
||||
|
||||
// P1: SlurmDB endpoints
|
||||
mux.HandleFunc("/slurmdb/v0.0.40/jobs", m.handleSlurmdbJobs)
|
||||
mux.HandleFunc("/slurmdb/v0.0.40/job/", m.handleSlurmdbJob)
|
||||
|
||||
srv := httptest.NewServer(mux)
|
||||
m.server = srv
|
||||
|
||||
return srv, m
|
||||
}
|
||||
|
||||
// Server returns the underlying httptest.Server.
|
||||
func (m *MockSlurm) Server() *httptest.Server {
|
||||
return m.server
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Controller methods
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// SetJobState transitions a job to the given state.
|
||||
// Terminal states (COMPLETED/FAILED/CANCELLED/TIMEOUT) evict the job from
|
||||
// activeJobs into historyJobs. RUNNING sets StartTime and stays active.
|
||||
// PENDING stays in activeJobs.
|
||||
func (m *MockSlurm) SetJobState(id int32, state string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
mj, ok := m.activeJobs[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
switch state {
|
||||
case "RUNNING":
|
||||
mj.State = state
|
||||
mj.StartTime = &now
|
||||
case "COMPLETED", "FAILED", "CANCELLED", "TIMEOUT":
|
||||
mj.State = state
|
||||
mj.EndTime = &now
|
||||
exitCode := int32(0)
|
||||
if state != "COMPLETED" {
|
||||
exitCode = 1
|
||||
}
|
||||
mj.ExitCode = &exitCode
|
||||
delete(m.activeJobs, id)
|
||||
m.historyJobs[id] = mj
|
||||
case "PENDING":
|
||||
mj.State = state
|
||||
}
|
||||
}
|
||||
|
||||
// GetJobState returns the current state of the job with the given ID.
|
||||
// Returns empty string if the job is not found.
|
||||
func (m *MockSlurm) GetJobState(id int32) string {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
if mj, ok := m.activeJobs[id]; ok {
|
||||
return mj.State
|
||||
}
|
||||
if mj, ok := m.historyJobs[id]; ok {
|
||||
return mj.State
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetAllActiveJobs returns all jobs currently in the active queue.
|
||||
func (m *MockSlurm) GetAllActiveJobs() []*MockJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]*MockJob, 0, len(m.activeJobs))
|
||||
for _, mj := range m.activeJobs {
|
||||
jobs = append(jobs, mj)
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
// GetAllHistoryJobs returns all jobs in the history queue.
|
||||
func (m *MockSlurm) GetAllHistoryJobs() []*MockJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]*MockJob, 0, len(m.historyJobs))
|
||||
for _, mj := range m.historyJobs {
|
||||
jobs = append(jobs, mj)
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Job Core Endpoints
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// POST /slurm/v0.0.40/job/submit
|
||||
func (m *MockSlurm) handleJobSubmit(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
var req slurm.JobSubmitReq
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
jobID := m.nextID
|
||||
m.nextID++
|
||||
|
||||
job := &MockJob{
|
||||
JobID: jobID,
|
||||
State: "PENDING", // MUST be non-empty for mapSlurmStateToTaskStatus
|
||||
SubmitTime: time.Now(),
|
||||
}
|
||||
|
||||
if req.Script != nil {
|
||||
job.Script = *req.Script
|
||||
}
|
||||
if req.Job != nil {
|
||||
if req.Job.Name != nil {
|
||||
job.Name = *req.Job.Name
|
||||
}
|
||||
if req.Job.Partition != nil {
|
||||
job.Partition = *req.Job.Partition
|
||||
}
|
||||
if req.Job.CurrentWorkingDirectory != nil {
|
||||
job.WorkDir = *req.Job.CurrentWorkingDirectory
|
||||
}
|
||||
if req.Job.Script != nil {
|
||||
job.Script = *req.Job.Script
|
||||
}
|
||||
}
|
||||
|
||||
m.activeJobs[jobID] = job
|
||||
m.mu.Unlock()
|
||||
|
||||
resp := NewSubmitResponse(jobID)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/jobs
|
||||
func (m *MockSlurm) handleGetJobs(w http.ResponseWriter, r *http.Request) {
|
||||
m.mu.RLock()
|
||||
jobs := make([]slurm.JobInfo, 0, len(m.activeJobs))
|
||||
for _, mj := range m.activeJobs {
|
||||
jobs = append(jobs, m.mockJobToJobInfo(mj))
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
resp := NewJobInfoResponse(jobs)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET/DELETE /slurm/v0.0.40/job/{id}
|
||||
func (m *MockSlurm) handleJobByID(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
// /slurm/v0.0.40/job/{id} → segments[0]="", [1]="slurm", [2]="v0.0.40", [3]="job", [4]=id
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing job id")
|
||||
return
|
||||
}
|
||||
|
||||
last := segments[4]
|
||||
|
||||
// Safety net: if "submit" leaks through prefix match, forward to submit handler
|
||||
if last == "submit" {
|
||||
m.handleJobSubmit(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(last, 10, 32)
|
||||
if err != nil {
|
||||
m.writeError(w, http.StatusBadRequest, "invalid job id")
|
||||
return
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
m.handleGetJobByID(w, int32(id))
|
||||
case http.MethodDelete:
|
||||
m.handleDeleteJobByID(w, int32(id))
|
||||
default:
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockSlurm) handleGetJobByID(w http.ResponseWriter, jobID int32) {
|
||||
m.mu.RLock()
|
||||
mj, ok := m.activeJobs[jobID]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
ji := m.mockJobToJobInfo(mj)
|
||||
resp := NewJobInfoResponse([]slurm.JobInfo{ji})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (m *MockSlurm) handleDeleteJobByID(w http.ResponseWriter, jobID int32) {
|
||||
m.SetJobState(jobID, "CANCELLED")
|
||||
resp := NewDeleteResponse()
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Cluster/History Endpoints
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// GET /slurm/v0.0.40/nodes
|
||||
func (m *MockSlurm) handleGetNodes(w http.ResponseWriter, r *http.Request) {
|
||||
nodes := make([]slurm.Node, len(m.nodes))
|
||||
for i, n := range m.nodes {
|
||||
nodes[i] = slurm.Node{Name: slurm.Ptr(n.Name)}
|
||||
}
|
||||
resp := NewNodeResponse(nodes)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/node/{name}
|
||||
func (m *MockSlurm) handleGetNode(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing node name")
|
||||
return
|
||||
}
|
||||
nodeName := segments[4]
|
||||
|
||||
var found *slurm.Node
|
||||
for _, n := range m.nodes {
|
||||
if n.Name == nodeName {
|
||||
found = &slurm.Node{Name: slurm.Ptr(n.Name)}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
m.writeError(w, http.StatusNotFound, "node not found")
|
||||
return
|
||||
}
|
||||
|
||||
resp := NewNodeResponse([]slurm.Node{*found})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/partitions
|
||||
func (m *MockSlurm) handleGetPartitions(w http.ResponseWriter, r *http.Request) {
|
||||
parts := make([]slurm.PartitionInfo, len(m.partitions))
|
||||
for i, p := range m.partitions {
|
||||
parts[i] = slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
|
||||
}
|
||||
resp := NewPartitionResponse(parts)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/partition/{name}
|
||||
func (m *MockSlurm) handleGetPartition(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusBadRequest, "missing partition name")
|
||||
return
|
||||
}
|
||||
partName := segments[4]
|
||||
|
||||
var found *slurm.PartitionInfo
|
||||
for _, p := range m.partitions {
|
||||
if p.Name == partName {
|
||||
found = &slurm.PartitionInfo{Name: slurm.Ptr(p.Name)}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
m.writeError(w, http.StatusNotFound, "partition not found")
|
||||
return
|
||||
}
|
||||
|
||||
resp := NewPartitionResponse([]slurm.PartitionInfo{*found})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurm/v0.0.40/diag
|
||||
func (m *MockSlurm) handleDiag(w http.ResponseWriter, r *http.Request) {
|
||||
resp := NewDiagResponse()
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurmdb/v0.0.40/jobs — supports filter params: job_name, start_time, end_time
|
||||
func (m *MockSlurm) handleSlurmdbJobs(w http.ResponseWriter, r *http.Request) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
jobs := make([]slurm.Job, 0)
|
||||
for _, mj := range m.historyJobs {
|
||||
// Filter by job_name
|
||||
if name := r.URL.Query().Get("job_name"); name != "" && mj.Name != name {
|
||||
continue
|
||||
}
|
||||
// Filter by start_time (job start must be >= filter start)
|
||||
if startStr := r.URL.Query().Get("start_time"); startStr != "" {
|
||||
if st, err := strconv.ParseInt(startStr, 10, 64); err == nil {
|
||||
if mj.StartTime == nil || mj.StartTime.Unix() < st {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter by end_time (job end must be <= filter end)
|
||||
if endStr := r.URL.Query().Get("end_time"); endStr != "" {
|
||||
if et, err := strconv.ParseInt(endStr, 10, 64); err == nil {
|
||||
if mj.EndTime == nil || mj.EndTime.Unix() > et {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
jobs = append(jobs, m.mockJobToSlurmDBJob(mj))
|
||||
}
|
||||
|
||||
resp := NewJobHistoryResponse(jobs)
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// GET /slurmdb/v0.0.40/job/{id} — returns OpenapiSlurmdbdJobsResp (with jobs array wrapper)
|
||||
func (m *MockSlurm) handleSlurmdbJob(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/")
|
||||
if len(segments) < 5 {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(segments[4], 10, 32)
|
||||
if err != nil {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
mj, ok := m.historyJobs[int32(id)]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
m.writeError(w, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
|
||||
dbJob := m.mockJobToSlurmDBJob(mj)
|
||||
resp := NewJobHistoryResponse([]slurm.Job{dbJob})
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Conversion helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// mockJobToJobInfo converts a MockJob to an active-endpoint JobInfo.
|
||||
// Uses buildActiveJobState for flat []string state format: ["RUNNING"].
|
||||
func (m *MockSlurm) mockJobToJobInfo(mj *MockJob) slurm.JobInfo {
|
||||
ji := slurm.JobInfo{
|
||||
JobID: slurm.Ptr(mj.JobID),
|
||||
JobState: buildActiveJobState(mj.State), // MUST be non-empty []string
|
||||
Name: slurm.Ptr(mj.Name),
|
||||
Partition: slurm.Ptr(mj.Partition),
|
||||
CurrentWorkingDirectory: slurm.Ptr(mj.WorkDir),
|
||||
SubmitTime: &slurm.Uint64NoVal{Number: slurm.Ptr(mj.SubmitTime.Unix())},
|
||||
}
|
||||
|
||||
if mj.StartTime != nil {
|
||||
ji.StartTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.StartTime.Unix())}
|
||||
}
|
||||
if mj.EndTime != nil {
|
||||
ji.EndTime = &slurm.Uint64NoVal{Number: slurm.Ptr(mj.EndTime.Unix())}
|
||||
}
|
||||
if mj.ExitCode != nil {
|
||||
ji.ExitCode = &slurm.ProcessExitCodeVerbose{
|
||||
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
|
||||
}
|
||||
}
|
||||
|
||||
return ji
|
||||
}
|
||||
|
||||
// mockJobToSlurmDBJob converts a MockJob to a SlurmDB history Job.
|
||||
// Uses buildHistoryJobState for nested state format: {current: ["COMPLETED"], reason: ""}.
|
||||
func (m *MockSlurm) mockJobToSlurmDBJob(mj *MockJob) slurm.Job {
|
||||
dbJob := slurm.Job{
|
||||
JobID: slurm.Ptr(mj.JobID),
|
||||
Name: slurm.Ptr(mj.Name),
|
||||
Partition: slurm.Ptr(mj.Partition),
|
||||
WorkingDirectory: slurm.Ptr(mj.WorkDir),
|
||||
Script: slurm.Ptr(mj.Script),
|
||||
State: buildHistoryJobState(mj.State),
|
||||
Time: &slurm.JobTime{
|
||||
Submission: slurm.Ptr(mj.SubmitTime.Unix()),
|
||||
},
|
||||
}
|
||||
|
||||
if mj.StartTime != nil {
|
||||
dbJob.Time.Start = slurm.Ptr(mj.StartTime.Unix())
|
||||
}
|
||||
if mj.EndTime != nil {
|
||||
dbJob.Time.End = slurm.Ptr(mj.EndTime.Unix())
|
||||
}
|
||||
if mj.ExitCode != nil {
|
||||
dbJob.ExitCode = &slurm.ProcessExitCodeVerbose{
|
||||
ReturnCode: &slurm.Uint32NoVal{Number: slurm.Ptr(int64(*mj.ExitCode))},
|
||||
}
|
||||
}
|
||||
|
||||
return dbJob
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// writeJSON writes a JSON response with the given status code.
|
||||
func writeJSON(w http.ResponseWriter, code int, v interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
// writeError writes an HTTP error with an OpenapiResp body containing
|
||||
// meta and errors fields. This is critical for CheckResponse/IsNotFound
|
||||
// to work correctly — the response body must be parseable as OpenapiResp.
|
||||
func (m *MockSlurm) writeError(w http.ResponseWriter, statusCode int, message string) {
|
||||
meta := slurm.OpenapiMeta{
|
||||
Plugin: &slurm.MetaPlugin{
|
||||
Type: slurm.Ptr("openapi/v0.0.40"),
|
||||
Name: slurm.Ptr(""),
|
||||
},
|
||||
Slurm: &slurm.MetaSlurm{
|
||||
Version: &slurm.MetaSlurmVersion{
|
||||
Major: slurm.Ptr("24"),
|
||||
Micro: slurm.Ptr("0"),
|
||||
Minor: slurm.Ptr("5"),
|
||||
},
|
||||
Release: slurm.Ptr("24.05.0"),
|
||||
},
|
||||
}
|
||||
|
||||
resp := slurm.OpenapiResp{
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{
|
||||
{
|
||||
ErrorNumber: slurm.Ptr(int32(0)),
|
||||
Error: slurm.Ptr(message),
|
||||
},
|
||||
},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
|
||||
writeJSON(w, statusCode, resp)
|
||||
}
|
||||
679
internal/testutil/mockslurm/server_test.go
Normal file
679
internal/testutil/mockslurm/server_test.go
Normal file
@@ -0,0 +1,679 @@
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
func setupTestClient(t *testing.T) (*slurm.Client, *MockSlurm) {
|
||||
t.Helper()
|
||||
srv, mock := NewMockSlurmServer()
|
||||
t.Cleanup(srv.Close)
|
||||
client, err := slurm.NewClientWithOpts(srv.URL, slurm.WithHTTPClient(srv.Client()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
return client, mock
|
||||
}
|
||||
|
||||
func submitTestJob(t *testing.T, client *slurm.Client, name, partition, workDir, script string) int32 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr(script),
|
||||
Job: &slurm.JobDescMsg{
|
||||
Name: slurm.Ptr(name),
|
||||
Partition: slurm.Ptr(partition),
|
||||
CurrentWorkingDirectory: slurm.Ptr(workDir),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob failed: %v", err)
|
||||
}
|
||||
if resp.JobID == nil {
|
||||
t.Fatal("SubmitJob returned nil JobID")
|
||||
}
|
||||
return *resp.JobID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Submit Job
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSubmitJob(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr("#!/bin/bash\necho hello"),
|
||||
Job: &slurm.JobDescMsg{
|
||||
Name: slurm.Ptr("test-job"),
|
||||
Partition: slurm.Ptr("normal"),
|
||||
CurrentWorkingDirectory: slurm.Ptr("/tmp/work"),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob failed: %v", err)
|
||||
}
|
||||
if resp.JobID == nil || *resp.JobID != 1 {
|
||||
t.Errorf("JobID = %v, want 1", resp.JobID)
|
||||
}
|
||||
if resp.StepID == nil || *resp.StepID != "Scalar" {
|
||||
t.Errorf("StepID = %v, want Scalar", resp.StepID)
|
||||
}
|
||||
if resp.Result == nil || resp.Result.JobID == nil || *resp.Result.JobID != 1 {
|
||||
t.Errorf("Result.JobID = %v, want 1", resp.Result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitJobAutoIncrement(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
resp, _, err := client.Jobs.SubmitJob(ctx, &slurm.JobSubmitReq{
|
||||
Script: slurm.Ptr("#!/bin/bash\necho " + strconv.Itoa(i)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SubmitJob %d failed: %v", i, err)
|
||||
}
|
||||
if resp.JobID == nil || *resp.JobID != int32(i) {
|
||||
t.Errorf("job %d: JobID = %v, want %d", i, resp.JobID, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Get All Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetJobsEmpty(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 0 {
|
||||
t.Errorf("len(Jobs) = %d, want 0", len(resp.Jobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJobsWithSubmitted(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
submitTestJob(t, client, "job-a", "normal", "/tmp/a", "#!/bin/bash\ntrue")
|
||||
submitTestJob(t, client, "job-b", "gpu", "/tmp/b", "#!/bin/bash\nfalse")
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 2 {
|
||||
t.Fatalf("len(Jobs) = %d, want 2", len(resp.Jobs))
|
||||
}
|
||||
|
||||
names := map[string]bool{}
|
||||
for _, j := range resp.Jobs {
|
||||
if j.Name != nil {
|
||||
names[*j.Name] = true
|
||||
}
|
||||
if len(j.JobState) == 0 || j.JobState[0] != "PENDING" {
|
||||
t.Errorf("JobState = %v, want [PENDING]", j.JobState)
|
||||
}
|
||||
}
|
||||
if !names["job-a"] || !names["job-b"] {
|
||||
t.Errorf("expected job-a and job-b, got %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Get Job By ID
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetJobByID(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "single-job", "normal", "/tmp/work", "#!/bin/bash\necho hi")
|
||||
|
||||
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.JobID == nil || *job.JobID != jobID {
|
||||
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
|
||||
}
|
||||
if job.Name == nil || *job.Name != "single-job" {
|
||||
t.Errorf("Name = %v, want single-job", job.Name)
|
||||
}
|
||||
if job.Partition == nil || *job.Partition != "normal" {
|
||||
t.Errorf("Partition = %v, want normal", job.Partition)
|
||||
}
|
||||
if job.CurrentWorkingDirectory == nil || *job.CurrentWorkingDirectory != "/tmp/work" {
|
||||
t.Errorf("CurrentWorkingDirectory = %v, want /tmp/work", job.CurrentWorkingDirectory)
|
||||
}
|
||||
if job.SubmitTime == nil || job.SubmitTime.Number == nil {
|
||||
t.Error("SubmitTime should be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJobByIDNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Jobs.GetJob(ctx, "999", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown job ID, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error type = %T, want SlurmAPIError with 404", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Delete Job (triggers eviction)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestDeleteJob(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "cancel-me", "normal", "/tmp", "#!/bin/bash\nsleep 99")
|
||||
|
||||
resp, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteJob failed: %v", err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("DeleteJob returned nil response")
|
||||
}
|
||||
|
||||
if len(mock.GetAllActiveJobs()) != 0 {
|
||||
t.Error("active jobs should be empty after delete")
|
||||
}
|
||||
if len(mock.GetAllHistoryJobs()) != 1 {
|
||||
t.Error("history should contain 1 job after delete")
|
||||
}
|
||||
if mock.GetJobState(jobID) != "CANCELLED" {
|
||||
t.Errorf("job state = %q, want CANCELLED", mock.GetJobState(jobID))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteJobEvictsFromActive(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "to-delete", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
|
||||
_, _, err := client.Jobs.DeleteJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteJob failed: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected 404 after delete, got nil error")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Job State format ([]string, not bare string)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestJobStateIsStringArray(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
submitTestJob(t, client, "state-test", "normal", "/tmp", "#!/bin/bash\necho")
|
||||
|
||||
resp, _, err := client.Jobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) == 0 {
|
||||
t.Fatal("expected at least one job")
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if len(job.JobState) == 0 {
|
||||
t.Fatal("JobState is empty — must be non-empty []string to avoid mapSlurmStateToTaskStatus silent failure")
|
||||
}
|
||||
if job.JobState[0] != "PENDING" {
|
||||
t.Errorf("JobState[0] = %q, want %q", job.JobState[0], "PENDING")
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(job)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal job: %v", err)
|
||||
}
|
||||
if !strings.Contains(string(raw), `"job_state":["PENDING"]`) {
|
||||
t.Errorf("JobState JSON = %s, want array format [\"PENDING\"]", string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P0: Full Job Lifecycle
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestJobLifecycle(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "lifecycle", "normal", "/tmp/lc", "#!/bin/bash\necho lifecycle")
|
||||
|
||||
// Verify PENDING in active
|
||||
resp, _, err := client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob PENDING: %v", err)
|
||||
}
|
||||
if resp.Jobs[0].JobState[0] != "PENDING" {
|
||||
t.Errorf("initial state = %v, want PENDING", resp.Jobs[0].JobState)
|
||||
}
|
||||
if len(mock.GetAllActiveJobs()) != 1 {
|
||||
t.Error("should have 1 active job")
|
||||
}
|
||||
|
||||
// Transition to RUNNING
|
||||
mock.SetJobState(jobID, "RUNNING")
|
||||
resp, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob RUNNING: %v", err)
|
||||
}
|
||||
if resp.Jobs[0].JobState[0] != "RUNNING" {
|
||||
t.Errorf("running state = %v, want RUNNING", resp.Jobs[0].JobState)
|
||||
}
|
||||
if resp.Jobs[0].StartTime == nil || resp.Jobs[0].StartTime.Number == nil {
|
||||
t.Error("StartTime should be set for RUNNING job")
|
||||
}
|
||||
if len(mock.GetAllActiveJobs()) != 1 {
|
||||
t.Error("should still have 1 active job after RUNNING")
|
||||
}
|
||||
|
||||
// Transition to COMPLETED — triggers eviction
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
_, _, err = client.Jobs.GetJob(ctx, strconv.Itoa(int(jobID)), nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected 404 after COMPLETED (evicted from active)")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
|
||||
if len(mock.GetAllActiveJobs()) != 0 {
|
||||
t.Error("active jobs should be empty after COMPLETED")
|
||||
}
|
||||
if len(mock.GetAllHistoryJobs()) != 1 {
|
||||
t.Error("history should contain 1 job after COMPLETED")
|
||||
}
|
||||
if mock.GetJobState(jobID) != "COMPLETED" {
|
||||
t.Errorf("state = %q, want COMPLETED", mock.GetJobState(jobID))
|
||||
}
|
||||
|
||||
// Verify history endpoint returns the job
|
||||
histResp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("SlurmdbJobs.GetJob: %v", err)
|
||||
}
|
||||
if len(histResp.Jobs) != 1 {
|
||||
t.Fatalf("history jobs = %d, want 1", len(histResp.Jobs))
|
||||
}
|
||||
histJob := histResp.Jobs[0]
|
||||
if histJob.State == nil || len(histJob.State.Current) == 0 || histJob.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("history state = %v, want current=[COMPLETED]", histJob.State)
|
||||
}
|
||||
if histJob.ExitCode == nil || histJob.ExitCode.ReturnCode == nil || histJob.ExitCode.ReturnCode.Number == nil {
|
||||
t.Error("history ExitCode should be set")
|
||||
} else if *histJob.ExitCode.ReturnCode.Number != 0 {
|
||||
t.Errorf("exit code = %d, want 0 for COMPLETED", *histJob.ExitCode.ReturnCode.Number)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Nodes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetNodes(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
_ = mock
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Nodes.GetNodes(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNodes failed: %v", err)
|
||||
}
|
||||
if resp.Nodes == nil {
|
||||
t.Fatal("Nodes is nil")
|
||||
}
|
||||
if len(*resp.Nodes) != 3 {
|
||||
t.Errorf("len(Nodes) = %d, want 3", len(*resp.Nodes))
|
||||
}
|
||||
names := make([]string, len(*resp.Nodes))
|
||||
for i, n := range *resp.Nodes {
|
||||
if n.Name == nil {
|
||||
t.Errorf("node %d: Name is nil", i)
|
||||
} else {
|
||||
names[i] = *n.Name
|
||||
}
|
||||
}
|
||||
for _, expected := range []string{"node01", "node02", "node03"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == expected {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("missing node %q in %v", expected, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNode(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Nodes.GetNode(ctx, "node02", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNode failed: %v", err)
|
||||
}
|
||||
if resp.Nodes == nil || len(*resp.Nodes) != 1 {
|
||||
t.Fatalf("expected 1 node, got %v", resp.Nodes)
|
||||
}
|
||||
if (*resp.Nodes)[0].Name == nil || *(*resp.Nodes)[0].Name != "node02" {
|
||||
t.Errorf("Name = %v, want node02", (*resp.Nodes)[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Nodes.GetNode(ctx, "nonexistent", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown node, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Partitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetPartitions(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Partitions.GetPartitions(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPartitions failed: %v", err)
|
||||
}
|
||||
if resp.Partitions == nil {
|
||||
t.Fatal("Partitions is nil")
|
||||
}
|
||||
if len(*resp.Partitions) != 2 {
|
||||
t.Errorf("len(Partitions) = %d, want 2", len(*resp.Partitions))
|
||||
}
|
||||
names := map[string]bool{}
|
||||
for _, p := range *resp.Partitions {
|
||||
if p.Name != nil {
|
||||
names[*p.Name] = true
|
||||
}
|
||||
}
|
||||
if !names["normal"] || !names["gpu"] {
|
||||
t.Errorf("expected normal and gpu partitions, got %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPartition(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Partitions.GetPartition(ctx, "gpu", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPartition failed: %v", err)
|
||||
}
|
||||
if resp.Partitions == nil || len(*resp.Partitions) != 1 {
|
||||
t.Fatalf("expected 1 partition, got %v", resp.Partitions)
|
||||
}
|
||||
if (*resp.Partitions)[0].Name == nil || *(*resp.Partitions)[0].Name != "gpu" {
|
||||
t.Errorf("Name = %v, want gpu", (*resp.Partitions)[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPartitionNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.Partitions.GetPartition(ctx, "nonexistent", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown partition, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: Diag
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGetDiag(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.Diag.GetDiag(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDiag failed: %v", err)
|
||||
}
|
||||
if resp.Statistics == nil {
|
||||
t.Fatal("Statistics is nil")
|
||||
}
|
||||
if resp.Statistics.ServerThreadCount == nil || *resp.Statistics.ServerThreadCount != 3 {
|
||||
t.Errorf("ServerThreadCount = %v, want 3", resp.Statistics.ServerThreadCount)
|
||||
}
|
||||
if resp.Statistics.AgentQueueSize == nil || *resp.Statistics.AgentQueueSize != 0 {
|
||||
t.Errorf("AgentQueueSize = %v, want 0", resp.Statistics.AgentQueueSize)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// P1: SlurmDB Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSlurmdbGetJobsEmpty(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 0 {
|
||||
t.Errorf("len(Jobs) = %d, want 0 (no history)", len(resp.Jobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobsAfterEviction(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "hist-job", "normal", "/tmp/h", "#!/bin/bash\necho hist")
|
||||
mock.SetJobState(jobID, "RUNNING")
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.Name == nil || *job.Name != "hist-job" {
|
||||
t.Errorf("Name = %v, want hist-job", job.Name)
|
||||
}
|
||||
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("State = %v, want current=[COMPLETED]", job.State)
|
||||
}
|
||||
if job.Time == nil || job.Time.Submission == nil {
|
||||
t.Error("Time.Submission should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobByID(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "single-hist", "normal", "/tmp/sh", "#!/bin/bash\nexit 1")
|
||||
mock.SetJobState(jobID, "FAILED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1", len(resp.Jobs))
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.JobID == nil || *job.JobID != jobID {
|
||||
t.Errorf("JobID = %v, want %d", job.JobID, jobID)
|
||||
}
|
||||
if job.State == nil || len(job.State.Current) == 0 || job.State.Current[0] != "FAILED" {
|
||||
t.Errorf("State = %v, want current=[FAILED]", job.State)
|
||||
}
|
||||
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
|
||||
t.Error("ExitCode should be set")
|
||||
} else if *job.ExitCode.ReturnCode.Number != 1 {
|
||||
t.Errorf("exit code = %d, want 1 for FAILED", *job.ExitCode.ReturnCode.Number)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbGetJobNotFound(t *testing.T) {
|
||||
client, _ := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := client.SlurmdbJobs.GetJob(ctx, "999")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown history job, got nil")
|
||||
}
|
||||
if !slurm.IsNotFound(err) {
|
||||
t.Errorf("error = %v, want not-found", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbJobStateIsNested(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
jobID := submitTestJob(t, client, "nested-state", "gpu", "/tmp/ns", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(jobID, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob failed: %v", err)
|
||||
}
|
||||
|
||||
job := resp.Jobs[0]
|
||||
if job.State == nil {
|
||||
t.Fatal("State is nil — must be nested {current: [...], reason: \"\"}")
|
||||
}
|
||||
if len(job.State.Current) == 0 {
|
||||
t.Fatal("State.Current is empty")
|
||||
}
|
||||
if job.State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("State.Current[0] = %q, want COMPLETED", job.State.Current[0])
|
||||
}
|
||||
if job.State.Reason == nil || *job.State.Reason != "" {
|
||||
t.Errorf("State.Reason = %v, want empty string", job.State.Reason)
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(job)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
rawStr := string(raw)
|
||||
if !strings.Contains(rawStr, `"state":{"current":["COMPLETED"]`) {
|
||||
t.Errorf("state JSON should use nested format, got: %s", rawStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlurmdbJobsFilterByName(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
id1 := submitTestJob(t, client, "match-me", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
id2 := submitTestJob(t, client, "other-job", "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(id1, "COMPLETED")
|
||||
mock.SetJobState(id2, "COMPLETED")
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJobs(ctx, &slurm.GetSlurmdbJobsOptions{
|
||||
JobName: slurm.Ptr("match-me"),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GetJobs with filter: %v", err)
|
||||
}
|
||||
if len(resp.Jobs) != 1 {
|
||||
t.Fatalf("len(Jobs) = %d, want 1 (filtered by name)", len(resp.Jobs))
|
||||
}
|
||||
if resp.Jobs[0].Name == nil || *resp.Jobs[0].Name != "match-me" {
|
||||
t.Errorf("Name = %v, want match-me", resp.Jobs[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SetJobState terminal state exit codes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSetJobStateExitCodes(t *testing.T) {
|
||||
client, mock := setupTestClient(t)
|
||||
ctx := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
state string
|
||||
wantExit int64
|
||||
}{
|
||||
{"COMPLETED", 0},
|
||||
{"FAILED", 1},
|
||||
{"CANCELLED", 1},
|
||||
{"TIMEOUT", 1},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
jobID := submitTestJob(t, client, "exit-"+strconv.Itoa(i), "normal", "/tmp", "#!/bin/bash\ntrue")
|
||||
mock.SetJobState(jobID, tc.state)
|
||||
|
||||
resp, _, err := client.SlurmdbJobs.GetJob(ctx, strconv.Itoa(int(jobID)))
|
||||
if err != nil {
|
||||
t.Fatalf("GetJob(%d) %s: %v", jobID, tc.state, err)
|
||||
}
|
||||
job := resp.Jobs[0]
|
||||
if job.ExitCode == nil || job.ExitCode.ReturnCode == nil || job.ExitCode.ReturnCode.Number == nil {
|
||||
t.Errorf("%s: ExitCode not set", tc.state)
|
||||
continue
|
||||
}
|
||||
if *job.ExitCode.ReturnCode.Number != tc.wantExit {
|
||||
t.Errorf("%s: exit code = %d, want %d", tc.state, *job.ExitCode.ReturnCode.Number, tc.wantExit)
|
||||
}
|
||||
}
|
||||
}
|
||||
134
internal/testutil/mockslurm/types.go
Normal file
134
internal/testutil/mockslurm/types.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package mockslurm provides response builder helpers that generate JSON
|
||||
// matching Openapi* types from the internal/slurm package.
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
// newMeta returns standard OpenapiMeta with plugin type "openapi/v0.0.40"
|
||||
// and Slurm version 24.05.0.
|
||||
func newMeta() slurm.OpenapiMeta {
|
||||
return slurm.OpenapiMeta{
|
||||
Plugin: &slurm.MetaPlugin{
|
||||
Type: slurm.Ptr("openapi/v0.0.40"),
|
||||
Name: slurm.Ptr("slurmrestd"),
|
||||
DataParser: slurm.Ptr("json/v0.0.40"),
|
||||
},
|
||||
Slurm: &slurm.MetaSlurm{
|
||||
Version: &slurm.MetaSlurmVersion{
|
||||
Major: slurm.Ptr("24"),
|
||||
Micro: slurm.Ptr("0"),
|
||||
Minor: slurm.Ptr("5"),
|
||||
},
|
||||
Release: slurm.Ptr("24.05.0"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewSubmitResponse builds an OpenapiJobSubmitResponse with the given jobID.
|
||||
func NewSubmitResponse(jobID int32) slurm.OpenapiJobSubmitResponse {
|
||||
return slurm.OpenapiJobSubmitResponse{
|
||||
Result: &slurm.JobSubmitResponseMsg{
|
||||
JobID: slurm.Ptr(jobID),
|
||||
},
|
||||
JobID: slurm.Ptr(jobID),
|
||||
StepID: slurm.Ptr("Scalar"),
|
||||
Meta: &slurm.OpenapiMeta{},
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobInfoResponse builds an OpenapiJobInfoResp wrapping the given jobs.
|
||||
func NewJobInfoResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiJobInfoResp{
|
||||
Jobs: jobs,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobListResponse is an alias for NewJobInfoResponse.
|
||||
func NewJobListResponse(jobs []slurm.JobInfo) slurm.OpenapiJobInfoResp {
|
||||
return NewJobInfoResponse(jobs)
|
||||
}
|
||||
|
||||
// NewDeleteResponse builds an OpenapiResp with meta and empty errors/warnings.
|
||||
func NewDeleteResponse() slurm.OpenapiResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiResp{
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNodeResponse builds an OpenapiNodesResp wrapping the given nodes.
|
||||
func NewNodeResponse(nodes []slurm.Node) slurm.OpenapiNodesResp {
|
||||
meta := newMeta()
|
||||
n := slurm.Nodes(nodes)
|
||||
return slurm.OpenapiNodesResp{
|
||||
Nodes: &n,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewPartitionResponse builds an OpenapiPartitionResp wrapping the given partitions.
|
||||
func NewPartitionResponse(partitions []slurm.PartitionInfo) slurm.OpenapiPartitionResp {
|
||||
meta := newMeta()
|
||||
p := slurm.PartitionInfoMsg(partitions)
|
||||
return slurm.OpenapiPartitionResp{
|
||||
Partitions: &p,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDiagResponse builds an OpenapiDiagResp with stats and meta.
|
||||
func NewDiagResponse() slurm.OpenapiDiagResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiDiagResp{
|
||||
Statistics: &slurm.StatsMsg{
|
||||
ServerThreadCount: slurm.Ptr(int32(3)),
|
||||
AgentQueueSize: slurm.Ptr(int32(0)),
|
||||
JobsRunning: slurm.Ptr(int32(0)),
|
||||
JobsPending: slurm.Ptr(int32(0)),
|
||||
ScheduleQueueLength: slurm.Ptr(int32(0)),
|
||||
},
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewJobHistoryResponse builds an OpenapiSlurmdbdJobsResp wrapping the given SlurmDBD jobs.
|
||||
func NewJobHistoryResponse(jobs []slurm.Job) slurm.OpenapiSlurmdbdJobsResp {
|
||||
meta := newMeta()
|
||||
return slurm.OpenapiSlurmdbdJobsResp{
|
||||
Jobs: jobs,
|
||||
Meta: &meta,
|
||||
Errors: slurm.OpenapiErrors{},
|
||||
Warnings: slurm.OpenapiWarnings{},
|
||||
}
|
||||
}
|
||||
|
||||
// buildActiveJobState returns a flat string array for the active endpoint
|
||||
// job_state field (e.g. ["RUNNING"]).
|
||||
func buildActiveJobState(states ...string) []string {
|
||||
return states
|
||||
}
|
||||
|
||||
// buildHistoryJobState returns a nested JobState object for the SlurmDB
|
||||
// history endpoint (e.g. {current: ["COMPLETED"], reason: ""}).
|
||||
func buildHistoryJobState(states ...string) *slurm.JobState {
|
||||
return &slurm.JobState{
|
||||
Current: states,
|
||||
Reason: slurm.Ptr(""),
|
||||
}
|
||||
}
|
||||
291
internal/testutil/mockslurm/types_test.go
Normal file
291
internal/testutil/mockslurm/types_test.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package mockslurm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
)
|
||||
|
||||
func TestNewSubmitResponse(t *testing.T) {
|
||||
resp := NewSubmitResponse(42)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobSubmitResponse
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Result == nil || got.Result.JobID == nil || *got.Result.JobID != 42 {
|
||||
t.Errorf("result.job_id = %v, want 42", got.Result)
|
||||
}
|
||||
if got.JobID == nil || *got.JobID != 42 {
|
||||
t.Errorf("job_id = %v, want 42", got.JobID)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
if _, ok := raw["job_id"]; !ok {
|
||||
t.Error("missing snake_case field 'job_id'")
|
||||
}
|
||||
if _, ok := raw["result"]; !ok {
|
||||
t.Error("missing field 'result'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobInfoResponse(t *testing.T) {
|
||||
jobs := []slurm.JobInfo{
|
||||
{JobID: slurm.Ptr(int32(1)), Name: slurm.Ptr("test1"), JobState: []string{"RUNNING"}},
|
||||
{JobID: slurm.Ptr(int32(2)), Name: slurm.Ptr("test2"), JobState: []string{"PENDING"}},
|
||||
}
|
||||
resp := NewJobInfoResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobInfoResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if len(got.Jobs) != 2 {
|
||||
t.Fatalf("len(jobs) = %d, want 2", len(got.Jobs))
|
||||
}
|
||||
if *got.Jobs[0].JobID != 1 {
|
||||
t.Errorf("jobs[0].job_id = %d, want 1", *got.Jobs[0].JobID)
|
||||
}
|
||||
if got.Jobs[0].JobState[0] != "RUNNING" {
|
||||
t.Errorf("jobs[0].job_state = %v, want [RUNNING]", got.Jobs[0].JobState)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
jobsRaw := raw["jobs"].([]interface{})
|
||||
job0 := jobsRaw[0].(map[string]interface{})
|
||||
stateRaw, ok := job0["job_state"]
|
||||
if !ok {
|
||||
t.Error("missing snake_case field 'job_state'")
|
||||
}
|
||||
stateArr, ok := stateRaw.([]interface{})
|
||||
if !ok || len(stateArr) != 1 || stateArr[0].(string) != "RUNNING" {
|
||||
t.Errorf("job_state = %v, want array [\"RUNNING\"]", stateRaw)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobListResponse(t *testing.T) {
|
||||
jobs := []slurm.JobInfo{
|
||||
{JobID: slurm.Ptr(int32(10)), Name: slurm.Ptr("listjob")},
|
||||
}
|
||||
resp := NewJobListResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiJobInfoResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if len(got.Jobs) != 1 || *got.Jobs[0].JobID != 10 {
|
||||
t.Errorf("jobs = %+v, want single job with id 10", got.Jobs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDeleteResponse(t *testing.T) {
|
||||
resp := NewDeleteResponse()
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Meta == nil {
|
||||
t.Error("meta is nil")
|
||||
}
|
||||
if len(got.Errors) != 0 {
|
||||
t.Errorf("errors = %v, want empty", got.Errors)
|
||||
}
|
||||
if len(got.Warnings) != 0 {
|
||||
t.Errorf("warnings = %v, want empty", got.Warnings)
|
||||
}
|
||||
|
||||
if got.Meta.Plugin == nil || got.Meta.Plugin.Type == nil || *got.Meta.Plugin.Type != "openapi/v0.0.40" {
|
||||
t.Error("meta.plugin.type missing or wrong")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeResponse(t *testing.T) {
|
||||
nodes := []slurm.Node{
|
||||
{Name: slurm.Ptr("node1"), State: []string{"IDLE"}},
|
||||
{Name: slurm.Ptr("node2"), State: []string{"ALLOCATED"}},
|
||||
}
|
||||
resp := NewNodeResponse(nodes)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiNodesResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Nodes == nil || len(*got.Nodes) != 2 {
|
||||
t.Fatalf("nodes = %v, want 2 nodes", got.Nodes)
|
||||
}
|
||||
gotNodes := *got.Nodes
|
||||
if *gotNodes[0].Name != "node1" {
|
||||
t.Errorf("nodes[0].name = %s, want node1", *gotNodes[0].Name)
|
||||
}
|
||||
if gotNodes[0].State[0] != "IDLE" {
|
||||
t.Errorf("nodes[0].state = %v, want [IDLE]", gotNodes[0].State)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPartitionResponse(t *testing.T) {
|
||||
partitions := []slurm.PartitionInfo{
|
||||
{Name: slurm.Ptr("normal")},
|
||||
{Name: slurm.Ptr("debug")},
|
||||
}
|
||||
resp := NewPartitionResponse(partitions)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiPartitionResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Partitions == nil || len(*got.Partitions) != 2 {
|
||||
t.Fatalf("partitions = %v, want 2", got.Partitions)
|
||||
}
|
||||
gotParts := *got.Partitions
|
||||
if *gotParts[0].Name != "normal" {
|
||||
t.Errorf("partitions[0].name = %s, want normal", *gotParts[0].Name)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
if _, ok := raw["partitions"]; !ok {
|
||||
t.Error("missing snake_case field 'partitions'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDiagResponse(t *testing.T) {
|
||||
resp := NewDiagResponse()
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiDiagResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if got.Statistics == nil {
|
||||
t.Fatal("statistics is nil")
|
||||
}
|
||||
if got.Statistics.ServerThreadCount == nil || *got.Statistics.ServerThreadCount != 3 {
|
||||
t.Errorf("statistics.server_thread_count = %v, want 3", got.Statistics.ServerThreadCount)
|
||||
}
|
||||
if got.Meta == nil {
|
||||
t.Error("meta is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobHistoryResponse(t *testing.T) {
|
||||
jobs := []slurm.Job{
|
||||
{JobID: slurm.Ptr(int32(100)), Name: slurm.Ptr("histjob"), State: &slurm.JobState{Current: []string{"COMPLETED"}}},
|
||||
}
|
||||
resp := NewJobHistoryResponse(jobs)
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
|
||||
var got slurm.OpenapiSlurmdbdJobsResp
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if len(got.Jobs) != 1 {
|
||||
t.Fatalf("len(jobs) = %d, want 1", len(got.Jobs))
|
||||
}
|
||||
if *got.Jobs[0].JobID != 100 {
|
||||
t.Errorf("jobs[0].job_id = %d, want 100", *got.Jobs[0].JobID)
|
||||
}
|
||||
if got.Jobs[0].State == nil || len(got.Jobs[0].State.Current) != 1 || got.Jobs[0].State.Current[0] != "COMPLETED" {
|
||||
t.Errorf("jobs[0].state = %+v, want current=[COMPLETED]", got.Jobs[0].State)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
jobsRaw := raw["jobs"].([]interface{})
|
||||
job0 := jobsRaw[0].(map[string]interface{})
|
||||
stateRaw := job0["state"].(map[string]interface{})
|
||||
currentRaw := stateRaw["current"].([]interface{})
|
||||
if currentRaw[0].(string) != "COMPLETED" {
|
||||
t.Errorf("state.current = %v, want [COMPLETED]", currentRaw)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildActiveJobState(t *testing.T) {
|
||||
states := buildActiveJobState("RUNNING", "COMPLETING")
|
||||
if len(states) != 2 {
|
||||
t.Fatalf("len = %d, want 2", len(states))
|
||||
}
|
||||
if states[0] != "RUNNING" || states[1] != "COMPLETING" {
|
||||
t.Errorf("states = %v, want [RUNNING, COMPLETING]", states)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildHistoryJobState(t *testing.T) {
|
||||
state := buildHistoryJobState("COMPLETED")
|
||||
if state == nil {
|
||||
t.Fatal("state is nil")
|
||||
}
|
||||
if len(state.Current) != 1 || state.Current[0] != "COMPLETED" {
|
||||
t.Errorf("current = %v, want [COMPLETED]", state.Current)
|
||||
}
|
||||
if state.Reason == nil || *state.Reason != "" {
|
||||
t.Errorf("reason = %v, want empty string", state.Reason)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
json.Unmarshal(data, &raw)
|
||||
current, ok := raw["current"].([]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("current is not an array: %v", raw["current"])
|
||||
}
|
||||
if current[0].(string) != "COMPLETED" {
|
||||
t.Errorf("current[0] = %v, want COMPLETED", current[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetaFields(t *testing.T) {
|
||||
meta := newMeta()
|
||||
if meta.Plugin == nil || *meta.Plugin.Type != "openapi/v0.0.40" {
|
||||
t.Error("plugin.type not set correctly")
|
||||
}
|
||||
if meta.Slurm == nil || meta.Slurm.Version == nil {
|
||||
t.Fatal("slurm.version is nil")
|
||||
}
|
||||
if *meta.Slurm.Version.Major != "24" || *meta.Slurm.Version.Minor != "5" || *meta.Slurm.Version.Micro != "0" {
|
||||
t.Errorf("slurm.version = %v, want 24.5.0", meta.Slurm.Version)
|
||||
}
|
||||
}
|
||||
405
internal/testutil/testenv/env.go
Normal file
405
internal/testutil/testenv/env.go
Normal file
@@ -0,0 +1,405 @@
|
||||
// Package testenv provides a complete test environment factory that wires up
|
||||
// SQLite DB + MockSlurm + MockMinIO + all Stores/Services/Handlers + httptest Server.
|
||||
package testenv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/app"
|
||||
"gcy_hpc_server/internal/config"
|
||||
"gcy_hpc_server/internal/handler"
|
||||
"gcy_hpc_server/internal/model"
|
||||
"gcy_hpc_server/internal/server"
|
||||
"gcy_hpc_server/internal/service"
|
||||
"gcy_hpc_server/internal/slurm"
|
||||
"gcy_hpc_server/internal/storage"
|
||||
"gcy_hpc_server/internal/store"
|
||||
"gcy_hpc_server/internal/testutil/mockminio"
|
||||
"gcy_hpc_server/internal/testutil/mockslurm"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestEnv holds a fully wired test environment with all dependencies.
|
||||
type TestEnv struct {
|
||||
DB *gorm.DB
|
||||
MockSlurm *mockslurm.MockSlurm
|
||||
MockMinIO *mockminio.InMemoryStorage
|
||||
|
||||
// Internal
|
||||
mockSlurmSrv *httptest.Server
|
||||
srv *httptest.Server
|
||||
poller *app.TaskPoller
|
||||
|
||||
// Stores
|
||||
appStore *store.ApplicationStore
|
||||
taskStore *store.TaskStore
|
||||
fileStore *store.FileStore
|
||||
blobStore *store.BlobStore
|
||||
uploadStore *store.UploadStore
|
||||
folderStore *store.FolderStore
|
||||
|
||||
// Services
|
||||
jobSvc *service.JobService
|
||||
clusterSvc *service.ClusterService
|
||||
folderSvc *service.FolderService
|
||||
stagingSvc *service.FileStagingService
|
||||
taskSvc *service.TaskService
|
||||
appSvc *service.ApplicationService
|
||||
uploadSvc *service.UploadService
|
||||
fileSvc *service.FileService
|
||||
|
||||
jobH *handler.JobHandler
|
||||
clusterH *handler.ClusterHandler
|
||||
appH *handler.ApplicationHandler
|
||||
uploadH *handler.UploadHandler
|
||||
fileH *handler.FileHandler
|
||||
folderH *handler.FolderHandler
|
||||
taskH *handler.TaskHandler
|
||||
|
||||
workDir string
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// Option configures a TestEnv during construction.
|
||||
type Option func(*testEnvConfig)
|
||||
|
||||
type testEnvConfig struct {
|
||||
workDir string
|
||||
debugLog bool
|
||||
}
|
||||
|
||||
// WithWorkDir overrides the default temporary work directory.
|
||||
func WithWorkDir(path string) Option {
|
||||
return func(c *testEnvConfig) { c.workDir = path }
|
||||
}
|
||||
|
||||
// WithDebugLogging switches from zap.NewNop() to zap.NewExample().
|
||||
func WithDebugLogging() Option {
|
||||
return func(c *testEnvConfig) { c.debugLog = true }
|
||||
}
|
||||
|
||||
// NewTestEnv creates a fully wired test environment.
|
||||
// t.Cleanup is registered for all resources — callers do not need to clean up.
|
||||
func NewTestEnv(t interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
}, opts ...Option) *TestEnv {
|
||||
cfg := &testEnvConfig{}
|
||||
for _, o := range opts {
|
||||
o(cfg)
|
||||
}
|
||||
|
||||
var logger *zap.Logger
|
||||
if cfg.debugLog {
|
||||
logger = zap.NewExample()
|
||||
} else {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
// 1. SQLite in-memory DB + AutoMigrate
|
||||
dbName := fmt.Sprintf("file:testenv-%d?mode=memory&cache=shared", rand.Int63())
|
||||
db, err := gorm.Open(sqlite.Open(dbName), &gorm.Config{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open SQLite: %v", err)
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get underlying sql.DB: %v", err)
|
||||
}
|
||||
sqlDB.SetMaxOpenConns(1)
|
||||
if err := db.AutoMigrate(
|
||||
&model.Application{},
|
||||
&model.FileBlob{},
|
||||
&model.File{},
|
||||
&model.Folder{},
|
||||
&model.UploadSession{},
|
||||
&model.UploadChunk{},
|
||||
&model.Task{},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to auto-migrate: %v", err)
|
||||
}
|
||||
|
||||
// 2. Temp work directory
|
||||
workDir := cfg.workDir
|
||||
if workDir == "" {
|
||||
wd, err := os.MkdirTemp("", "testenv-workdir-*")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
workDir = wd
|
||||
}
|
||||
|
||||
// 3. MockSlurm httptest server
|
||||
mockSlurmSrv, mockSlurm := mockslurm.NewMockSlurmServer()
|
||||
|
||||
// 4. MockMinIO
|
||||
mockMinIO := mockminio.NewInMemoryStorage()
|
||||
|
||||
// 5. All 6 Store instances
|
||||
appStore := store.NewApplicationStore(db)
|
||||
taskStore := store.NewTaskStore(db)
|
||||
fileStore := store.NewFileStore(db)
|
||||
blobStore := store.NewBlobStore(db)
|
||||
uploadStore := store.NewUploadStore(db)
|
||||
folderStore := store.NewFolderStore(db)
|
||||
|
||||
// 6. Slurm client
|
||||
slurmClient, err := slurm.NewClientWithOpts(mockSlurmSrv.URL, slurm.WithHTTPClient(mockSlurmSrv.Client()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create slurm client: %v", err)
|
||||
}
|
||||
|
||||
// 7. MinioConfig
|
||||
minioCfg := config.MinioConfig{
|
||||
ChunkSize: 16 << 20,
|
||||
MaxFileSize: 50 << 30,
|
||||
MinChunkSize: 5 << 20,
|
||||
SessionTTL: 48,
|
||||
Bucket: "files",
|
||||
}
|
||||
|
||||
// 8. All Service instances (dependency order)
|
||||
jobSvc := service.NewJobService(slurmClient, logger)
|
||||
clusterSvc := service.NewClusterService(slurmClient, logger)
|
||||
folderSvc := service.NewFolderService(folderStore, fileStore, logger)
|
||||
stagingSvc := service.NewFileStagingService(fileStore, blobStore, mockMinIO, minioCfg.Bucket, logger)
|
||||
taskSvc := service.NewTaskService(taskStore, appStore, fileStore, blobStore, stagingSvc, jobSvc, workDir, logger)
|
||||
appSvc := service.NewApplicationService(appStore, jobSvc, workDir, logger, taskSvc)
|
||||
uploadSvc := service.NewUploadService(mockMinIO, blobStore, fileStore, uploadStore, minioCfg, db, logger)
|
||||
fileSvc := service.NewFileService(mockMinIO, blobStore, fileStore, minioCfg.Bucket, db, logger)
|
||||
|
||||
// 9. All 7 Handler instances
|
||||
jobH := handler.NewJobHandler(jobSvc, logger)
|
||||
clusterH := handler.NewClusterHandler(clusterSvc, logger)
|
||||
appH := handler.NewApplicationHandler(appSvc, logger)
|
||||
uploadH := handler.NewUploadHandler(uploadSvc, logger)
|
||||
fileH := handler.NewFileHandler(fileSvc, logger)
|
||||
folderH := handler.NewFolderHandler(folderSvc, logger)
|
||||
taskH := handler.NewTaskHandler(taskSvc, logger)
|
||||
|
||||
// 10. Router
|
||||
router := server.NewRouter(jobH, clusterH, appH, uploadH, fileH, folderH, taskH, logger)
|
||||
|
||||
// 11. HTTP test server
|
||||
srv := httptest.NewServer(router)
|
||||
|
||||
// 12. Start TaskProcessor
|
||||
ctx := context.Background()
|
||||
taskSvc.StartProcessor(ctx)
|
||||
|
||||
// 13. Start TaskPoller (100ms interval for tests)
|
||||
poller := app.NewTaskPoller(taskSvc, 100*time.Millisecond, logger)
|
||||
poller.Start(ctx)
|
||||
|
||||
env := &TestEnv{
|
||||
DB: db,
|
||||
MockSlurm: mockSlurm,
|
||||
MockMinIO: mockMinIO,
|
||||
mockSlurmSrv: mockSlurmSrv,
|
||||
srv: srv,
|
||||
poller: poller,
|
||||
appStore: appStore,
|
||||
taskStore: taskStore,
|
||||
fileStore: fileStore,
|
||||
blobStore: blobStore,
|
||||
uploadStore: uploadStore,
|
||||
folderStore: folderStore,
|
||||
jobSvc: jobSvc,
|
||||
clusterSvc: clusterSvc,
|
||||
folderSvc: folderSvc,
|
||||
stagingSvc: stagingSvc,
|
||||
taskSvc: taskSvc,
|
||||
appSvc: appSvc,
|
||||
uploadSvc: uploadSvc,
|
||||
fileSvc: fileSvc,
|
||||
jobH: jobH,
|
||||
clusterH: clusterH,
|
||||
appH: appH,
|
||||
uploadH: uploadH,
|
||||
fileH: fileH,
|
||||
folderH: folderH,
|
||||
taskH: taskH,
|
||||
workDir: workDir,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
// Cleanup registration (LIFO — last registered runs first).
|
||||
// We use a *testing.T-compatible interface; callers using *testing.T
|
||||
// will have t.Cleanup work correctly.
|
||||
if ct, ok := t.(interface{ Cleanup(func()) }); ok {
|
||||
ct.Cleanup(func() { os.RemoveAll(workDir) })
|
||||
ct.Cleanup(srv.Close)
|
||||
ct.Cleanup(poller.Stop)
|
||||
ct.Cleanup(taskSvc.StopProcessor)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
// URL returns the base URL of the test HTTP server.
|
||||
func (env *TestEnv) URL() string {
|
||||
return env.srv.URL
|
||||
}
|
||||
|
||||
// DoRequest sends an HTTP request to the test server.
|
||||
// When body is non-nil, Content-Type: application/json is set.
|
||||
func (env *TestEnv) DoRequest(method, path string, body io.Reader) *http.Response {
|
||||
req, err := http.NewRequest(method, env.srv.URL+path, body)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create request: %v", err))
|
||||
}
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to send request: %v", err))
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// DecodeResponse decodes an API response into its components.
|
||||
// Returns success flag, raw data, and any error from decoding.
|
||||
func (env *TestEnv) DecodeResponse(resp *http.Response) (bool, json.RawMessage, error) {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, nil, fmt.Errorf("read body: %w", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return false, nil, fmt.Errorf("unmarshal response: %w (body: %s)", err, string(body))
|
||||
}
|
||||
return result.Success, result.Data, nil
|
||||
}
|
||||
|
||||
// CreateApp creates an application directly via the service and returns the app ID.
|
||||
func (env *TestEnv) CreateApp(name, scriptTemplate string, params json.RawMessage) (int64, error) {
|
||||
ctx := context.Background()
|
||||
appID, err := env.appSvc.CreateApplication(ctx, &model.CreateApplicationRequest{
|
||||
Name: name,
|
||||
ScriptTemplate: scriptTemplate,
|
||||
Parameters: params,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return appID, nil
|
||||
}
|
||||
|
||||
// UploadTestData uploads content to MockMinIO and creates FileBlob + File records.
|
||||
// Returns (fileID, blobID).
|
||||
func (env *TestEnv) UploadTestData(name string, content []byte) (int64, int64) {
|
||||
ctx := context.Background()
|
||||
|
||||
h := sha256.Sum256(content)
|
||||
sha256Key := hex.EncodeToString(h[:])
|
||||
|
||||
_, err := env.MockMinIO.PutObject(ctx, "files", sha256Key, bytes.NewReader(content), int64(len(content)), storage.PutObjectOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("PutObject failed: %v", err))
|
||||
}
|
||||
|
||||
blob, err := env.blobStore.GetBySHA256(ctx, sha256Key)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("get blob by sha256: %v", err))
|
||||
}
|
||||
if blob == nil {
|
||||
blob = &model.FileBlob{
|
||||
SHA256: sha256Key,
|
||||
MinioKey: sha256Key,
|
||||
FileSize: int64(len(content)),
|
||||
RefCount: 1,
|
||||
}
|
||||
if err := env.blobStore.Create(ctx, blob); err != nil {
|
||||
panic(fmt.Sprintf("create blob: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
file := &model.File{
|
||||
Name: name,
|
||||
BlobSHA256: sha256Key,
|
||||
}
|
||||
if err := env.fileStore.Create(ctx, file); err != nil {
|
||||
panic(fmt.Sprintf("create file failed: %v", err))
|
||||
}
|
||||
|
||||
return file.ID, blob.ID
|
||||
}
|
||||
|
||||
// CreateFile uploads test data and returns only the file ID.
|
||||
func (env *TestEnv) CreateFile(name string, content []byte) (int64, error) {
|
||||
fileID, _ := env.UploadTestData(name, content)
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
// WaitForTaskStatus polls the DB until the task reaches the target status or timeout.
|
||||
func (env *TestEnv) WaitForTaskStatus(taskID int64, status string, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
ctx := context.Background()
|
||||
for time.Now().Before(deadline) {
|
||||
task, err := env.taskStore.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get task: %w", err)
|
||||
}
|
||||
if task == nil {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
if task.Status == status {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
task, _ := env.taskStore.GetByID(ctx, taskID)
|
||||
if task != nil {
|
||||
return fmt.Errorf("timeout waiting for task %d to reach status %q, current: %q", taskID, status, task.Status)
|
||||
}
|
||||
return fmt.Errorf("timeout waiting for task %d to reach status %q", taskID, status)
|
||||
}
|
||||
|
||||
// MakeTaskStale sets the task's updated_at to 31 seconds ago via raw SQL,
|
||||
// bypassing GORM's AutoUpdateTime.
|
||||
func (env *TestEnv) MakeTaskStale(taskID int64) error {
|
||||
sqlDB, err := env.DB.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get sql.DB: %w", err)
|
||||
}
|
||||
_, err = sqlDB.Exec("UPDATE hpc_tasks SET updated_at = ? WHERE id = ?", time.Now().Add(-31*time.Second), taskID)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTaskSlurmJobID returns the slurm_job_id for the given task via raw SQL.
|
||||
func (env *TestEnv) GetTaskSlurmJobID(taskID int64) (int32, error) {
|
||||
sqlDB, err := env.DB.DB()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("get sql.DB: %w", err)
|
||||
}
|
||||
var jobID sql.NullInt32
|
||||
err = sqlDB.QueryRow("SELECT slurm_job_id FROM hpc_tasks WHERE id = ?", taskID).Scan(&jobID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !jobID.Valid {
|
||||
return 0, fmt.Errorf("task %d has no slurm_job_id", taskID)
|
||||
}
|
||||
return jobID.Int32, nil
|
||||
}
|
||||
245
internal/testutil/testenv/env_test.go
Normal file
245
internal/testutil/testenv/env_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package testenv
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gcy_hpc_server/internal/model"
|
||||
"gcy_hpc_server/internal/storage"
|
||||
)
|
||||
|
||||
func TestNewTestEnv(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
defer func() {
|
||||
env.taskSvc.StopProcessor()
|
||||
env.poller.Stop()
|
||||
env.srv.Close()
|
||||
}()
|
||||
|
||||
if env.DB == nil {
|
||||
t.Fatal("DB is nil")
|
||||
}
|
||||
if env.MockSlurm == nil {
|
||||
t.Fatal("MockSlurm is nil")
|
||||
}
|
||||
if env.MockMinIO == nil {
|
||||
t.Fatal("MockMinIO is nil")
|
||||
}
|
||||
if env.srv == nil {
|
||||
t.Fatal("srv is nil")
|
||||
}
|
||||
|
||||
resp, err := http.Get(env.srv.URL + "/api/v1/applications")
|
||||
if err != nil {
|
||||
t.Fatalf("GET /api/v1/applications: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllRoutesRegistered(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
routes := []struct {
|
||||
method string
|
||||
path string
|
||||
}{
|
||||
{"POST", "/api/v1/jobs/submit"},
|
||||
{"GET", "/api/v1/jobs"},
|
||||
{"GET", "/api/v1/jobs/history"},
|
||||
{"GET", "/api/v1/jobs/1"},
|
||||
{"DELETE", "/api/v1/jobs/1"},
|
||||
{"GET", "/api/v1/nodes"},
|
||||
{"GET", "/api/v1/nodes/node01"},
|
||||
{"GET", "/api/v1/partitions"},
|
||||
{"GET", "/api/v1/partitions/normal"},
|
||||
{"GET", "/api/v1/diag"},
|
||||
{"GET", "/api/v1/applications"},
|
||||
{"POST", "/api/v1/applications"},
|
||||
{"GET", "/api/v1/applications/1"},
|
||||
{"PUT", "/api/v1/applications/1"},
|
||||
{"DELETE", "/api/v1/applications/1"},
|
||||
{"POST", "/api/v1/applications/1/submit"},
|
||||
{"POST", "/api/v1/files/uploads"},
|
||||
{"GET", "/api/v1/files/uploads/1"},
|
||||
{"PUT", "/api/v1/files/uploads/1/chunks/0"},
|
||||
{"POST", "/api/v1/files/uploads/1/complete"},
|
||||
{"DELETE", "/api/v1/files/uploads/1"},
|
||||
{"GET", "/api/v1/files"},
|
||||
{"GET", "/api/v1/files/1"},
|
||||
{"GET", "/api/v1/files/1/download"},
|
||||
{"DELETE", "/api/v1/files/1"},
|
||||
{"POST", "/api/v1/files/folders"},
|
||||
{"GET", "/api/v1/files/folders"},
|
||||
{"GET", "/api/v1/files/folders/1"},
|
||||
{"DELETE", "/api/v1/files/folders/1"},
|
||||
{"POST", "/api/v1/tasks"},
|
||||
{"GET", "/api/v1/tasks"},
|
||||
}
|
||||
|
||||
if len(routes) != 31 {
|
||||
t.Fatalf("expected 31 routes, got %d", len(routes))
|
||||
}
|
||||
|
||||
for _, r := range routes {
|
||||
req, err := http.NewRequest(r.method, env.srv.URL+r.path, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s %s: create request: %v", r.method, r.path, err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("%s %s: %v", r.method, r.path, err)
|
||||
}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
// Gin returns 404 for unregistered routes WITHOUT a JSON body.
|
||||
// Handler-level 404s return JSON with {"success":false,...}.
|
||||
// So a 404 that decodes as valid JSON means the route IS registered.
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
var apiResp struct {
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
if json.Unmarshal(body, &apiResp) != nil {
|
||||
t.Errorf("%s %s: got router-level 404 (route not registered)", r.method, r.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeTaskStale(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
ctx := t.Context()
|
||||
task := &model.Task{
|
||||
TaskName: "stale-test",
|
||||
AppID: 1,
|
||||
AppName: "test-app",
|
||||
Status: model.TaskStatusSubmitted,
|
||||
}
|
||||
taskID, err := env.taskStore.Create(ctx, task)
|
||||
if err != nil {
|
||||
t.Fatalf("create task: %v", err)
|
||||
}
|
||||
|
||||
before := time.Now()
|
||||
if err := env.MakeTaskStale(taskID); err != nil {
|
||||
t.Fatalf("MakeTaskStale: %v", err)
|
||||
}
|
||||
|
||||
updated, err := env.taskStore.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
t.Fatalf("get task: %v", err)
|
||||
}
|
||||
if !updated.UpdatedAt.Before(before.Add(-25 * time.Second)) {
|
||||
t.Errorf("expected updated_at to be >25s in the past, got %v (before=%v)", updated.UpdatedAt, before)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadTestData(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
content := []byte("hello testenv")
|
||||
fileID, blobID := env.UploadTestData("test.txt", content)
|
||||
|
||||
if fileID == 0 {
|
||||
t.Fatal("fileID is 0")
|
||||
}
|
||||
if blobID == 0 {
|
||||
t.Fatal("blobID is 0")
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
file, err := env.fileStore.GetByID(ctx, fileID)
|
||||
if err != nil {
|
||||
t.Fatalf("get file: %v", err)
|
||||
}
|
||||
if file == nil {
|
||||
t.Fatal("file not found")
|
||||
}
|
||||
if file.Name != "test.txt" {
|
||||
t.Errorf("expected name 'test.txt', got %q", file.Name)
|
||||
}
|
||||
|
||||
blob, err := env.blobStore.GetBySHA256(ctx, file.BlobSHA256)
|
||||
if err != nil {
|
||||
t.Fatalf("get blob: %v", err)
|
||||
}
|
||||
if blob == nil {
|
||||
t.Fatal("blob not found")
|
||||
}
|
||||
if blob.FileSize != int64(len(content)) {
|
||||
t.Errorf("expected size %d, got %d", len(content), blob.FileSize)
|
||||
}
|
||||
|
||||
obj, info, err := env.MockMinIO.GetObject(ctx, "files", blob.MinioKey, storage.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("get object: %v", err)
|
||||
}
|
||||
defer obj.Close()
|
||||
got, _ := io.ReadAll(obj)
|
||||
if string(got) != string(content) {
|
||||
t.Errorf("expected content %q, got %q", content, got)
|
||||
}
|
||||
if info.Size != int64(len(content)) {
|
||||
t.Errorf("expected object size %d, got %d", len(content), info.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateApp(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
params := json.RawMessage(`[{"name":"cores","type":"integer","required":true}]`)
|
||||
appID, err := env.CreateApp("test-app", "#!/bin/bash\necho hello", params)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateApp: %v", err)
|
||||
}
|
||||
if appID == 0 {
|
||||
t.Fatal("appID is 0")
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
app, err := env.appStore.GetByID(ctx, appID)
|
||||
if err != nil {
|
||||
t.Fatalf("get app: %v", err)
|
||||
}
|
||||
if app == nil {
|
||||
t.Fatal("app not found")
|
||||
}
|
||||
if app.Name != "test-app" {
|
||||
t.Errorf("expected name 'test-app', got %q", app.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoRequest_SetsJSONContentType(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest("POST", "/api/v1/applications", nil)
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == 0 {
|
||||
t.Fatal("no status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeResponse(t *testing.T) {
|
||||
env := NewTestEnv(t)
|
||||
|
||||
resp := env.DoRequest("GET", "/api/v1/applications", nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
success, data, err := env.DecodeResponse(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodeResponse: %v", err)
|
||||
}
|
||||
if !success {
|
||||
t.Error("expected success=true for list applications")
|
||||
}
|
||||
if data == nil {
|
||||
t.Error("expected non-nil data")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user