feat: 添加 Partition 领域类型和 PartitionsService
包含 PartitionInfo 及其子结构体(Nodes、Accounts、Groups、QOS、TRES、CPUs、Defaults、Maximums、Minimums、Priority、Timeouts)。PartitionsService 提供 GetPartitions 和 GetPartition 2 个方法。 Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent) Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
71
internal/slurm/slurm_partitions.go
Normal file
71
internal/slurm/slurm_partitions.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package slurm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// GetPartitionsOptions specifies optional parameters for GetPartitions.
|
||||
type GetPartitionsOptions struct {
|
||||
UpdateTime *int64 `url:"update_time,omitempty"`
|
||||
}
|
||||
|
||||
// GetPartitions lists all partitions.
|
||||
func (s *PartitionsService) GetPartitions(ctx context.Context, opts *GetPartitionsOptions) (*OpenapiPartitionResp, *Response, error) {
|
||||
path := "slurm/v0.0.40/partitions"
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
u, parseErr := url.Parse(req.URL.String())
|
||||
if parseErr != nil {
|
||||
return nil, nil, parseErr
|
||||
}
|
||||
q := u.Query()
|
||||
if opts.UpdateTime != nil {
|
||||
q.Set("update_time", strconv.FormatInt(*opts.UpdateTime, 10))
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
req.URL = u
|
||||
}
|
||||
|
||||
var result OpenapiPartitionResp
|
||||
resp, err := s.client.Do(ctx, req, &result)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return &result, resp, nil
|
||||
}
|
||||
|
||||
// GetPartition gets a single partition by name.
|
||||
func (s *PartitionsService) GetPartition(ctx context.Context, partitionName string, opts *GetPartitionsOptions) (*OpenapiPartitionResp, *Response, error) {
|
||||
path := fmt.Sprintf("slurm/v0.0.40/partition/%s", partitionName)
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
u, parseErr := url.Parse(req.URL.String())
|
||||
if parseErr != nil {
|
||||
return nil, nil, parseErr
|
||||
}
|
||||
q := u.Query()
|
||||
if opts.UpdateTime != nil {
|
||||
q.Set("update_time", strconv.FormatInt(*opts.UpdateTime, 10))
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
req.URL = u
|
||||
}
|
||||
|
||||
var result OpenapiPartitionResp
|
||||
resp, err := s.client.Do(ctx, req, &result)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return &result, resp, nil
|
||||
}
|
||||
111
internal/slurm/slurm_partitions_test.go
Normal file
111
internal/slurm/slurm_partitions_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package slurm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPartitionsService_GetPartitions(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/slurm/v0.0.40/partitions", func(w http.ResponseWriter, r *http.Request) {
|
||||
testMethod(t, r, "GET")
|
||||
fmt.Fprint(w, `{"partitions": [], "last_update": {}}`)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
client, _ := NewClient(server.URL, nil)
|
||||
resp, _, err := client.Partitions.GetPartitions(context.Background(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("expected non-nil response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionsService_GetPartitions_WithOptions(t *testing.T) {
|
||||
var capturedQuery url.Values
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/slurm/v0.0.40/partitions", func(w http.ResponseWriter, r *http.Request) {
|
||||
capturedQuery = r.URL.Query()
|
||||
testMethod(t, r, "GET")
|
||||
fmt.Fprint(w, `{"partitions": [], "last_update": {}}`)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
client, _ := NewClient(server.URL, nil)
|
||||
opts := &GetPartitionsOptions{
|
||||
UpdateTime: Ptr(int64(1700000000)),
|
||||
}
|
||||
_, _, err := client.Partitions.GetPartitions(context.Background(), opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if capturedQuery.Get("update_time") != "1700000000" {
|
||||
t.Errorf("expected update_time=1700000000, got %s", capturedQuery.Get("update_time"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionsService_GetPartition(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/slurm/v0.0.40/partition/gpu", func(w http.ResponseWriter, r *http.Request) {
|
||||
testMethod(t, r, "GET")
|
||||
fmt.Fprint(w, `{"partitions": [{"name": "gpu"}], "last_update": {}}`)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
client, _ := NewClient(server.URL, nil)
|
||||
resp, _, err := client.Partitions.GetPartition(context.Background(), "gpu", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("expected non-nil response")
|
||||
}
|
||||
if resp.Partitions == nil || len(*resp.Partitions) != 1 {
|
||||
t.Fatalf("expected 1 partition, got %v", resp.Partitions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionsService_GetPartitions_Error(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/slurm/v0.0.40/partitions", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprint(w, `{"errors": [{"error": "internal error"}]}`)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
client, _ := NewClient(server.URL, nil)
|
||||
_, _, err := client.Partitions.GetPartitions(context.Background(), nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for 500 response")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "500") {
|
||||
t.Errorf("expected error to contain 500, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionsService_GetPartition_Error(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/slurm/v0.0.40/partition/nonexistent", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprint(w, `{"errors": [{"error": "partition not found"}]}`)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
client, _ := NewClient(server.URL, nil)
|
||||
_, _, err := client.Partitions.GetPartition(context.Background(), "nonexistent", nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for 404 response")
|
||||
}
|
||||
}
|
||||
123
internal/slurm/types_partition.go
Normal file
123
internal/slurm/types_partition.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package slurm
|
||||
|
||||
// PartitionInfoNodes represents node-related fields within a partition (v0.0.40_partition_info.nodes).
|
||||
type PartitionInfoNodes struct {
|
||||
AllowedAllocation *string `json:"allowed_allocation,omitempty"`
|
||||
Configured *string `json:"configured,omitempty"`
|
||||
Total *int32 `json:"total,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoAccounts represents account-related fields within a partition (v0.0.40_partition_info.accounts).
|
||||
type PartitionInfoAccounts struct {
|
||||
Allowed *string `json:"allowed,omitempty"`
|
||||
Deny *string `json:"deny,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoGroups represents group-related fields within a partition (v0.0.40_partition_info.groups).
|
||||
type PartitionInfoGroups struct {
|
||||
Allowed *string `json:"allowed,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoQOS represents QOS-related fields within a partition (v0.0.40_partition_info.qos).
|
||||
type PartitionInfoQOS struct {
|
||||
Allowed *string `json:"allowed,omitempty"`
|
||||
Deny *string `json:"deny,omitempty"`
|
||||
Assigned *string `json:"assigned,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoTRES represents TRES-related fields within a partition (v0.0.40_partition_info.tres).
|
||||
type PartitionInfoTRES struct {
|
||||
BillingWeights *string `json:"billing_weights,omitempty"`
|
||||
Configured *string `json:"configured,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoCPUs represents CPU-related fields within a partition (v0.0.40_partition_info.cpus).
|
||||
type PartitionInfoCPUs struct {
|
||||
TaskBinding *int32 `json:"task_binding,omitempty"`
|
||||
Total *int32 `json:"total,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoDefaults represents default values for a partition (v0.0.40_partition_info.defaults).
|
||||
type PartitionInfoDefaults struct {
|
||||
MemoryPerCPU *int64 `json:"memory_per_cpu,omitempty"`
|
||||
PartitionMemoryPerCPU *Uint64NoVal `json:"partition_memory_per_cpu,omitempty"`
|
||||
PartitionMemoryPerNode *Uint64NoVal `json:"partition_memory_per_node,omitempty"`
|
||||
Time *Uint32NoVal `json:"time,omitempty"`
|
||||
Job *string `json:"job,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoMaximumsOversubscribe represents oversubscribe settings (v0.0.40_partition_info.maximums.oversubscribe).
|
||||
type PartitionInfoMaximumsOversubscribe struct {
|
||||
Jobs *int32 `json:"jobs,omitempty"`
|
||||
Flags []string `json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoMaximums represents maximum resource limits for a partition (v0.0.40_partition_info.maximums).
|
||||
type PartitionInfoMaximums struct {
|
||||
CpusPerNode *int32 `json:"cpus_per_node,omitempty"`
|
||||
CpusPerSocket *int32 `json:"cpus_per_socket,omitempty"`
|
||||
MemoryPerCPU *int64 `json:"memory_per_cpu,omitempty"`
|
||||
PartitionMemoryPerCPU *Uint64NoVal `json:"partition_memory_per_cpu,omitempty"`
|
||||
PartitionMemoryPerNode *Uint64NoVal `json:"partition_memory_per_node,omitempty"`
|
||||
Nodes *Uint32NoVal `json:"nodes,omitempty"`
|
||||
Shares *int32 `json:"shares,omitempty"`
|
||||
Oversubscribe *PartitionInfoMaximumsOversubscribe `json:"oversubscribe,omitempty"`
|
||||
Time *Uint32NoVal `json:"time,omitempty"`
|
||||
OverTimeLimit *Uint16NoVal `json:"over_time_limit,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoMinimums represents minimum resource limits for a partition (v0.0.40_partition_info.minimums).
|
||||
type PartitionInfoMinimums struct {
|
||||
Nodes *int32 `json:"nodes,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoPriority represents priority settings for a partition (v0.0.40_partition_info.priority).
|
||||
type PartitionInfoPriority struct {
|
||||
JobFactor *int32 `json:"job_factor,omitempty"`
|
||||
Tier *int32 `json:"tier,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoTimeouts represents timeout settings for a partition (v0.0.40_partition_info.timeouts).
|
||||
type PartitionInfoTimeouts struct {
|
||||
Resume *Uint16NoVal `json:"resume,omitempty"`
|
||||
Suspend *Uint16NoVal `json:"suspend,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoPartition represents the partition state (v0.0.40_partition_info.partition).
|
||||
type PartitionInfoPartition struct {
|
||||
State []string `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfo represents a Slurm partition (v0.0.40_partition_info).
|
||||
type PartitionInfo struct {
|
||||
Nodes *PartitionInfoNodes `json:"nodes,omitempty"`
|
||||
Accounts *PartitionInfoAccounts `json:"accounts,omitempty"`
|
||||
Groups *PartitionInfoGroups `json:"groups,omitempty"`
|
||||
QOS *PartitionInfoQOS `json:"qos,omitempty"`
|
||||
Alternate *string `json:"alternate,omitempty"`
|
||||
TRES *PartitionInfoTRES `json:"tres,omitempty"`
|
||||
Cluster *string `json:"cluster,omitempty"`
|
||||
CPUs *PartitionInfoCPUs `json:"cpus,omitempty"`
|
||||
Defaults *PartitionInfoDefaults `json:"defaults,omitempty"`
|
||||
GraceTime *int32 `json:"grace_time,omitempty"`
|
||||
Maximums *PartitionInfoMaximums `json:"maximums,omitempty"`
|
||||
Minimums *PartitionInfoMinimums `json:"minimums,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
NodeSets *string `json:"node_sets,omitempty"`
|
||||
Priority *PartitionInfoPriority `json:"priority,omitempty"`
|
||||
Timeouts *PartitionInfoTimeouts `json:"timeouts,omitempty"`
|
||||
Partition *PartitionInfoPartition `json:"partition,omitempty"`
|
||||
SuspendTime *Uint32NoVal `json:"suspend_time,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionInfoMsg is a collection of PartitionInfo objects (v0.0.40_partition_info_msg).
|
||||
type PartitionInfoMsg []PartitionInfo
|
||||
|
||||
// OpenapiPartitionResp represents the response for partition queries (v0.0.40_openapi_partition_resp).
|
||||
type OpenapiPartitionResp struct {
|
||||
Partitions *PartitionInfoMsg `json:"partitions,omitempty"`
|
||||
LastUpdate *Uint64NoVal `json:"last_update,omitempty"`
|
||||
Meta *OpenapiMeta `json:"meta,omitempty"`
|
||||
Errors OpenapiErrors `json:"errors,omitempty"`
|
||||
Warnings OpenapiWarnings `json:"warnings,omitempty"`
|
||||
}
|
||||
223
internal/slurm/types_partition_test.go
Normal file
223
internal/slurm/types_partition_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package slurm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPartitionInfoRoundTrip(t *testing.T) {
|
||||
orig := PartitionInfo{
|
||||
Nodes: &PartitionInfoNodes{
|
||||
AllowedAllocation: Ptr("node[01-10]"),
|
||||
Configured: Ptr("node[01-20]"),
|
||||
Total: Ptr(int32(20)),
|
||||
},
|
||||
Accounts: &PartitionInfoAccounts{
|
||||
Allowed: Ptr("admin,users"),
|
||||
Deny: Ptr("guest"),
|
||||
},
|
||||
Groups: &PartitionInfoGroups{
|
||||
Allowed: Ptr("slurm"),
|
||||
},
|
||||
QOS: &PartitionInfoQOS{
|
||||
Allowed: Ptr("normal,high"),
|
||||
Deny: Ptr("low"),
|
||||
Assigned: Ptr("normal"),
|
||||
},
|
||||
Alternate: Ptr("backup"),
|
||||
TRES: &PartitionInfoTRES{
|
||||
BillingWeights: Ptr("CPU=1.0"),
|
||||
Configured: Ptr("CPU"),
|
||||
},
|
||||
Cluster: Ptr("test-cluster"),
|
||||
CPUs: &PartitionInfoCPUs{
|
||||
TaskBinding: Ptr(int32(1)),
|
||||
Total: Ptr(int32(64)),
|
||||
},
|
||||
Defaults: &PartitionInfoDefaults{
|
||||
MemoryPerCPU: Ptr(int64(4096)),
|
||||
PartitionMemoryPerCPU: &Uint64NoVal{Set: Ptr(true), Number: Ptr(int64(4096))},
|
||||
PartitionMemoryPerNode: &Uint64NoVal{Set: Ptr(true), Number: Ptr(int64(65536))},
|
||||
Time: &Uint32NoVal{Set: Ptr(true), Number: Ptr(int64(3600))},
|
||||
Job: Ptr("default_job"),
|
||||
},
|
||||
GraceTime: Ptr(int32(300)),
|
||||
Maximums: &PartitionInfoMaximums{
|
||||
CpusPerNode: Ptr(int32(128)),
|
||||
CpusPerSocket: Ptr(int32(64)),
|
||||
MemoryPerCPU: Ptr(int64(8192)),
|
||||
PartitionMemoryPerCPU: &Uint64NoVal{Set: Ptr(true), Number: Ptr(int64(8192))},
|
||||
PartitionMemoryPerNode: &Uint64NoVal{Set: Ptr(true), Number: Ptr(int64(262144))},
|
||||
Nodes: &Uint32NoVal{Set: Ptr(true), Number: Ptr(int64(100))},
|
||||
Shares: Ptr(int32(4)),
|
||||
Oversubscribe: &PartitionInfoMaximumsOversubscribe{
|
||||
Jobs: Ptr(int32(2)),
|
||||
Flags: []string{"force"},
|
||||
},
|
||||
Time: &Uint32NoVal{Set: Ptr(true), Infinite: Ptr(true)},
|
||||
OverTimeLimit: &Uint16NoVal{Set: Ptr(true), Number: Ptr(int64(60))},
|
||||
},
|
||||
Minimums: &PartitionInfoMinimums{
|
||||
Nodes: Ptr(int32(1)),
|
||||
},
|
||||
Name: Ptr("normal"),
|
||||
NodeSets: Ptr("node[01-10]"),
|
||||
Priority: &PartitionInfoPriority{
|
||||
JobFactor: Ptr(int32(1)),
|
||||
Tier: Ptr(int32(100)),
|
||||
},
|
||||
Timeouts: &PartitionInfoTimeouts{
|
||||
Resume: &Uint16NoVal{Set: Ptr(true), Number: Ptr(int64(300))},
|
||||
Suspend: &Uint16NoVal{Set: Ptr(true), Number: Ptr(int64(600))},
|
||||
},
|
||||
Partition: &PartitionInfoPartition{
|
||||
State: []string{"UP"},
|
||||
},
|
||||
SuspendTime: &Uint32NoVal{Set: Ptr(true), Number: Ptr(int64(0))},
|
||||
}
|
||||
data, err := json.Marshal(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var decoded PartitionInfo
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if decoded.Name == nil || *decoded.Name != "normal" {
|
||||
t.Fatalf("name mismatch: %v", decoded.Name)
|
||||
}
|
||||
if decoded.Nodes == nil || decoded.Nodes.Total == nil || *decoded.Nodes.Total != 20 {
|
||||
t.Fatalf("nodes.total mismatch: %v", decoded.Nodes)
|
||||
}
|
||||
if decoded.Accounts == nil || decoded.Accounts.Allowed == nil || *decoded.Accounts.Allowed != "admin,users" {
|
||||
t.Fatalf("accounts.allowed mismatch: %v", decoded.Accounts)
|
||||
}
|
||||
if decoded.Maximums == nil || decoded.Maximums.Oversubscribe == nil || len(decoded.Maximums.Oversubscribe.Flags) != 1 || decoded.Maximums.Oversubscribe.Flags[0] != "force" {
|
||||
t.Fatalf("maximums.oversubscribe.flags mismatch: %v", decoded.Maximums)
|
||||
}
|
||||
if decoded.Partition == nil || len(decoded.Partition.State) != 1 || decoded.Partition.State[0] != "UP" {
|
||||
t.Fatalf("partition.state mismatch: %v", decoded.Partition)
|
||||
}
|
||||
if decoded.Defaults == nil || decoded.Defaults.Time == nil || decoded.Defaults.Time.Number == nil || *decoded.Defaults.Time.Number != 3600 {
|
||||
t.Fatalf("defaults.time mismatch: %v", decoded.Defaults)
|
||||
}
|
||||
if decoded.SuspendTime == nil || decoded.SuspendTime.Number == nil || *decoded.SuspendTime.Number != 0 {
|
||||
t.Fatalf("suspend_time mismatch: %v", decoded.SuspendTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionInfoEmptyRoundTrip(t *testing.T) {
|
||||
orig := PartitionInfo{}
|
||||
data, err := json.Marshal(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
if string(data) != "{}" {
|
||||
t.Fatalf("empty PartitionInfo should marshal to {}, got %s", data)
|
||||
}
|
||||
var decoded PartitionInfo
|
||||
if err := json.Unmarshal([]byte(`{}`), &decoded); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if decoded.Name != nil {
|
||||
t.Fatal("name should be nil for empty object")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionInfoMsgRoundTrip(t *testing.T) {
|
||||
orig := PartitionInfoMsg{
|
||||
{Name: Ptr("normal"), Cluster: Ptr("cluster1")},
|
||||
{Name: Ptr("gpu"), Cluster: Ptr("cluster1")},
|
||||
}
|
||||
data, err := json.Marshal(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var decoded PartitionInfoMsg
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if len(decoded) != 2 {
|
||||
t.Fatalf("expected 2 partitions, got %d", len(decoded))
|
||||
}
|
||||
if decoded[0].Name == nil || *decoded[0].Name != "normal" {
|
||||
t.Fatalf("partitions[0].name mismatch: %v", decoded[0].Name)
|
||||
}
|
||||
if decoded[1].Name == nil || *decoded[1].Name != "gpu" {
|
||||
t.Fatalf("partitions[1].name mismatch: %v", decoded[1].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenapiPartitionRespRoundTrip(t *testing.T) {
|
||||
partitions := PartitionInfoMsg{
|
||||
{
|
||||
Name: Ptr("normal"),
|
||||
Cluster: Ptr("test-cluster"),
|
||||
Partition: &PartitionInfoPartition{
|
||||
State: []string{"UP"},
|
||||
},
|
||||
Nodes: &PartitionInfoNodes{
|
||||
Total: Ptr(int32(100)),
|
||||
},
|
||||
CPUs: &PartitionInfoCPUs{
|
||||
Total: Ptr(int32(6400)),
|
||||
},
|
||||
Maximums: &PartitionInfoMaximums{
|
||||
Time: &Uint32NoVal{Set: Ptr(true), Infinite: Ptr(true)},
|
||||
Nodes: &Uint32NoVal{Set: Ptr(true), Number: Ptr(int64(100))},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: Ptr("debug"),
|
||||
Cluster: Ptr("test-cluster"),
|
||||
Partition: &PartitionInfoPartition{
|
||||
State: []string{"UP"},
|
||||
},
|
||||
Defaults: &PartitionInfoDefaults{
|
||||
Time: &Uint32NoVal{Set: Ptr(true), Number: Ptr(int64(600))},
|
||||
},
|
||||
},
|
||||
}
|
||||
orig := OpenapiPartitionResp{
|
||||
Partitions: &partitions,
|
||||
LastUpdate: &Uint64NoVal{Set: Ptr(true), Number: Ptr(int64(1700000000))},
|
||||
Meta: &OpenapiMeta{
|
||||
Slurm: &MetaSlurm{
|
||||
Version: &MetaSlurmVersion{
|
||||
Major: Ptr("24"),
|
||||
Micro: Ptr("5"),
|
||||
Minor: Ptr("05"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Errors: OpenapiErrors{},
|
||||
Warnings: OpenapiWarnings{},
|
||||
}
|
||||
data, err := json.Marshal(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var decoded OpenapiPartitionResp
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if decoded.Partitions == nil || len(*decoded.Partitions) != 2 {
|
||||
t.Fatalf("expected 2 partitions, got %d", len(*decoded.Partitions))
|
||||
}
|
||||
ps := *decoded.Partitions
|
||||
if ps[0].Name == nil || *ps[0].Name != "normal" {
|
||||
t.Fatalf("partitions[0].name mismatch")
|
||||
}
|
||||
if ps[0].Maximums == nil || ps[0].Maximums.Time == nil || ps[0].Maximums.Time.Infinite == nil || !*ps[0].Maximums.Time.Infinite {
|
||||
t.Fatalf("partitions[0].maximums.time.infinite mismatch")
|
||||
}
|
||||
if ps[1].Defaults == nil || ps[1].Defaults.Time == nil || ps[1].Defaults.Time.Number == nil || *ps[1].Defaults.Time.Number != 600 {
|
||||
t.Fatalf("partitions[1].defaults.time mismatch")
|
||||
}
|
||||
if decoded.LastUpdate == nil || decoded.LastUpdate.Number == nil || *decoded.LastUpdate.Number != 1700000000 {
|
||||
t.Fatalf("last_update mismatch: %v", decoded.LastUpdate)
|
||||
}
|
||||
if decoded.Meta == nil || decoded.Meta.Slurm == nil || decoded.Meta.Slurm.Version == nil {
|
||||
t.Fatal("meta.slurm.version should not be nil")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user