feat(service): map additional Slurm SDK fields and fix ExitCode/Default bugs

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
dailz
2026-04-10 11:12:51 +08:00
parent 85901fe18a
commit 824d9e816f
2 changed files with 146 additions and 13 deletions

View File

@@ -46,6 +46,27 @@ func uint32NoValString(v *slurm.Uint32NoVal) string {
return ""
}
func derefUint64NoValInt64(v *slurm.Uint64NoVal) *int64 {
if v != nil && v.Number != nil {
return v.Number
}
return nil
}
func derefCSVString(cs *slurm.CSVString) string {
if cs == nil || len(*cs) == 0 {
return ""
}
result := ""
for i, s := range *cs {
if i > 0 {
result += ","
}
result += s
}
return result
}
type ClusterService struct {
client *slurm.Client
logger *zap.Logger
@@ -227,20 +248,42 @@ func (s *ClusterService) GetDiag(ctx context.Context) (*slurm.OpenapiDiagResp, e
func mapNode(n slurm.Node) model.NodeResponse {
return model.NodeResponse{
Name: derefStr(n.Name),
State: n.State,
CPUs: derefInt32(n.Cpus),
RealMemory: derefInt64(n.RealMemory),
AllocMem: derefInt64(n.AllocMemory),
Arch: derefStr(n.Architecture),
OS: derefStr(n.OperatingSystem),
Name: derefStr(n.Name),
State: n.State,
CPUs: derefInt32(n.Cpus),
AllocCpus: n.AllocCpus,
Cores: n.Cores,
Sockets: n.Sockets,
Threads: n.Threads,
RealMemory: derefInt64(n.RealMemory),
AllocMemory: derefInt64(n.AllocMemory),
FreeMem: derefUint64NoValInt64(n.FreeMem),
CpuLoad: n.CpuLoad,
Arch: derefStr(n.Architecture),
OS: derefStr(n.OperatingSystem),
Gres: derefStr(n.Gres),
GresUsed: derefStr(n.GresUsed),
Reason: derefStr(n.Reason),
ReasonSetByUser: derefStr(n.ReasonSetByUser),
Address: derefStr(n.Address),
Hostname: derefStr(n.Hostname),
Weight: n.Weight,
Features: derefCSVString(n.Features),
ActiveFeatures: derefCSVString(n.ActiveFeatures),
}
}
func mapPartition(pi slurm.PartitionInfo) model.PartitionResponse {
var state []string
var isDefault bool
if pi.Partition != nil {
state = pi.Partition.State
for _, s := range state {
if s == "DEFAULT" {
isDefault = true
break
}
}
}
var nodes string
if pi.Nodes != nil {
@@ -258,12 +301,56 @@ func mapPartition(pi slurm.PartitionInfo) model.PartitionResponse {
if pi.Maximums != nil {
maxTime = uint32NoValString(pi.Maximums.Time)
}
var maxNodes *int32
if pi.Maximums != nil {
maxNodes = mapUint32NoValToInt32(pi.Maximums.Nodes)
}
var maxCPUsPerNode *int32
if pi.Maximums != nil {
maxCPUsPerNode = pi.Maximums.CpusPerNode
}
var minNodes *int32
if pi.Minimums != nil {
minNodes = pi.Minimums.Nodes
}
var defaultTime string
if pi.Defaults != nil {
defaultTime = uint32NoValString(pi.Defaults.Time)
}
var graceTime *int32 = pi.GraceTime
var priority *int32
if pi.Priority != nil {
priority = pi.Priority.JobFactor
}
var qosAllowed, qosDeny, qosAssigned string
if pi.QOS != nil {
qosAllowed = derefStr(pi.QOS.Allowed)
qosDeny = derefStr(pi.QOS.Deny)
qosAssigned = derefStr(pi.QOS.Assigned)
}
var accountsAllowed, accountsDeny string
if pi.Accounts != nil {
accountsAllowed = derefStr(pi.Accounts.Allowed)
accountsDeny = derefStr(pi.Accounts.Deny)
}
return model.PartitionResponse{
Name: derefStr(pi.Name),
State: state,
Nodes: nodes,
TotalCPUs: totalCPUs,
TotalNodes: totalNodes,
MaxTime: maxTime,
Name: derefStr(pi.Name),
State: state,
Default: isDefault,
Nodes: nodes,
TotalNodes: totalNodes,
TotalCPUs: totalCPUs,
MaxTime: maxTime,
MaxNodes: maxNodes,
MaxCPUsPerNode: maxCPUsPerNode,
MinNodes: minNodes,
DefaultTime: defaultTime,
GraceTime: graceTime,
Priority: priority,
QOSAllowed: qosAllowed,
QOSDeny: qosDeny,
QOSAssigned: qosAssigned,
AccountsAllowed: accountsAllowed,
AccountsDeny: accountsDeny,
}
}