diff --git a/CHANGELOG.md b/CHANGELOG.md index 41a31496..2cd1ec0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [unreleased] ### Added +- Support Slurm 24.11 and Slurm REST API v0.0.40 (#366 → #400). - agent: - Return RacksDB infrastructure name and a boolean to indicate if metrics feature is enabled in `/info` endpoint, in addition to the cluster name. @@ -64,14 +65,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add requirement on markdown external library for `gateway` extra package. ### Changed +- agent: Bump minimal required Slurm version from 23.02.0 to 23.11.0. - gateway: Change error message when unable to parse agent info fields. - docs: - Update configuration reference documentation. - Update dashboard screenshot in overview page with example of resource chart. + - Replace mention of Slurm REST API version v0.0.39 by v0.0.40. + - Mention requirement of Slurm >= 23.11 and dropped support of Slurm 23.02. - conf: - Convert `[cache]` > `password` agent parameter from string to password type. - Convert `[ldap]` > `bind_password` gateway parameter from string to password type. + - Bump `[slurmrestd]` > `version` default value from `0.0.39` to `0.0.40` in + agent configuration for compatibility with Slurm 24.11. - pkgs: - Add requirement on RFL.core >= 1.1.0. - Add requirement on RFL.settings >= 1.1.1. @@ -96,6 +102,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Update dependencies to fix CVE-2024-45812 and CVE-2024-45811 (vite), CVE-2024-47068 (rollup), CVE-2024-21538 (cross-spawn). +### Removed +- Support of Slurm 23.02 and Slurm REST API v0.0.39. +- conf: + - Remove unused `required` from default selected jobs field on `slurmrestd` + `/slurm/*/jobs` endpoint. + - Remove unused `state_reason` from default selected job field on `slurmrestd` + `/slurm/*/job/` endpoint. + ## [3.2.0] - 2024-09-05 ### Added diff --git a/assets/screenshots/assemblies/bitmaps/slurm-web_multi-clusters-large.png b/assets/screenshots/assemblies/bitmaps/slurm-web_multi-clusters-large.png index f02adf4c..43b63c88 100644 Binary files a/assets/screenshots/assemblies/bitmaps/slurm-web_multi-clusters-large.png and b/assets/screenshots/assemblies/bitmaps/slurm-web_multi-clusters-large.png differ diff --git a/assets/screenshots/raw/screenshot_clusters.png b/assets/screenshots/raw/screenshot_clusters.png index 993b4f8b..a7862293 100644 Binary files a/assets/screenshots/raw/screenshot_clusters.png and b/assets/screenshots/raw/screenshot_clusters.png differ diff --git a/assets/screenshots/shadowed/screenshot_clusters.png b/assets/screenshots/shadowed/screenshot_clusters.png index b2ffcabb..bb9b6f28 100644 Binary files a/assets/screenshots/shadowed/screenshot_clusters.png and b/assets/screenshots/shadowed/screenshot_clusters.png differ diff --git a/assets/screenshots/shadowed/screenshot_dashboard_tablet.png b/assets/screenshots/shadowed/screenshot_dashboard_tablet.png index 4a828ce4..b4e29ec9 100644 Binary files a/assets/screenshots/shadowed/screenshot_dashboard_tablet.png and b/assets/screenshots/shadowed/screenshot_dashboard_tablet.png differ diff --git a/conf/vendor/agent.yml b/conf/vendor/agent.yml index 542c0e69..4541a053 100644 --- a/conf/vendor/agent.yml +++ b/conf/vendor/agent.yml @@ -59,7 +59,7 @@ slurmrestd: doc: Path to slurmrestd UNIX socket version: type: str - default: '0.0.39' + default: '0.0.40' doc: | Slurm REST API version. @@ -101,7 +101,6 @@ filters: - partition - priority - qos - - required - script - state - steps @@ -131,7 +130,6 @@ filters: - standard_error - standard_input - standard_output - - state_reason - tasks - tres_req_str doc: | diff --git a/dev/crawl-tests-assets b/dev/crawl-tests-assets index aa9d5943..49478534 100755 --- a/dev/crawl-tests-assets +++ b/dev/crawl-tests-assets @@ -14,6 +14,7 @@ import getpass import socket import shlex import random +import os import logging import requests @@ -39,6 +40,7 @@ USER = getpass.getuser() METRICS_PREFERRED_CLUSTER = "emulator" # Map between infrastructure names and cluster names that are visible in Slurm-web. MAP_CLUSTER_NAMES = {"emulator": "atlas"} +ADMIN_PASSWORD_ENV_VAR = "SLURMWEB_DEV_ADMIN_PASSWORD" def slurmweb_cluster_name(infrastructure: str): @@ -172,13 +174,13 @@ def crawl_slurmrestd(socket: Path) -> None: session = requests.Session() prefix = "http+unix://slurmrestd/" - api = "0.0.39" + api = "0.0.40" session.mount(prefix, SlurmrestdUnixAdapter(socket)) # Get Slurm version text, _, _ = query_slurmrestd(session, prefix, f"/slurm/v{api}/ping") ping = json.loads(text) - release = ping["meta"]["Slurm"]["release"] + release = ping["meta"]["slurm"]["release"] version = release.rsplit(".", 1)[0] logger.info("Slurm version: %s release: %s", version, release) @@ -238,7 +240,7 @@ def crawl_slurmrestd(socket: Path) -> None: ) def dump_job_state(state: str): - if _job["job_state"] == state: + if state in _job["job_state"]: dump_slurmrestd_query( session, requests_statuses, @@ -317,6 +319,8 @@ def crawl_slurmrestd(socket: Path) -> None: assets_path, "slurm-nodes", skip_exist=False, + limit_dump=100, + limit_key="nodes", ) def dump_node_state(): @@ -387,7 +391,7 @@ def crawl_slurmrestd(socket: Path) -> None: # Save resulting status file with open(status_file, "w+") as fh: - json.dump(requests_statuses, fh, indent=2) + json.dump(requests_statuses, fh, indent=2, sort_keys=True) fh.write("\n") @@ -438,7 +442,15 @@ def gateway_url(dev_tmp_dir): def user_token(url: str, user: str): """Ask user password interactively, authenticate on gateway and return authentication JWT.""" - password = getpass.getpass(prompt=f"Password for {user} on gateway: ") + + try: + password = os.environ[ADMIN_PASSWORD_ENV_VAR] + except KeyError: + logger.info( + "Unable to read admin password from environment, opening interactive " + "prompt." + ) + password = getpass.getpass(prompt=f"Password for {user} on gateway: ") response = requests.post( f"{url}/api/login", json={"user": user, "password": password} @@ -461,6 +473,7 @@ def dump_component_query( asset_name: dict[int, str] | str, skip_exist: bool = True, prettify: bool = True, + limit_dump=0, ) -> Any: """Send GET HTTP request to Slurm-web component pointed by URL and save JSON result in assets directory.""" @@ -499,7 +512,10 @@ def dump_component_query( else: with open(asset, "w+") as fh: if asset.suffix == ".json": - fh.write(json.dumps(data, indent=2 if prettify else None)) + _data = data + if limit_dump: + _data = _data[:limit_dump] + fh.write(json.dumps(_data, indent=2 if prettify else None)) else: fh.write(data) return data @@ -568,6 +584,7 @@ def crawl_gateway(cluster: str, infrastructure: str, dev_tmp_dir: Path) -> str: assets_path, "jobs", skip_exist=False, + limit_dump=100, ) if not (len(jobs)): @@ -578,7 +595,7 @@ def crawl_gateway(cluster: str, infrastructure: str, dev_tmp_dir: Path) -> str: min_job_id = jobs[0]["job_id"] def dump_job_state() -> None: - if _job["job_state"] == state: + if state in _job["job_state"]: dump_component_query( requests_statuses, url, @@ -696,7 +713,7 @@ def crawl_gateway(cluster: str, infrastructure: str, dev_tmp_dir: Path) -> str: # Save resulting status file with open(status_file, "w+") as fh: - json.dump(requests_statuses, fh, indent=2) + json.dump(requests_statuses, fh, indent=2, sort_keys=True) fh.write("\n") return token @@ -742,7 +759,13 @@ def crawl_agent(port: int, token: str, metrics: bool) -> None: "stats", ) dump_component_query( - requests_statuses, url, f"/v{get_version()}/jobs", headers, assets_path, "jobs" + requests_statuses, + url, + f"/v{get_version()}/jobs", + headers, + assets_path, + "jobs", + limit_dump=100, ) nodes = dump_component_query( requests_statuses, @@ -813,7 +836,7 @@ def crawl_agent(port: int, token: str, metrics: bool) -> None: # FIXME: Download unknown job/node # Save resulting status file with open(status_file, "w+") as fh: - json.dump(requests_statuses, fh, indent=2) + json.dump(requests_statuses, fh, indent=2, sort_keys=True) fh.write("\n") diff --git a/dev/firehpc/conf/tiny/group_vars/all.yml b/dev/firehpc/conf/tiny/group_vars/all.yml index ae2b4b59..70461943 100644 --- a/dev/firehpc/conf/tiny/group_vars/all.yml +++ b/dev/firehpc/conf/tiny/group_vars/all.yml @@ -1,7 +1,7 @@ common_with_devs_repos: true common_hpckit_derivatives: - main -- slurm24.05 +- slurm24.11 slurm_with_jwt: false slurm_params: PriorityType: priority/multifactor diff --git a/docs/antora.yml b/docs/antora.yml index 46a37bed..4c6313cb 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -8,6 +8,7 @@ asciidoc: source-language: asciidoc@ table-caption: false version: 4.0.0 + api_version: 0.0.40 nav: - modules/overview/nav.adoc - modules/install/nav.adoc diff --git a/docs/modules/conf/examples/agent.ini b/docs/modules/conf/examples/agent.ini index b437c619..f3c72504 100644 --- a/docs/modules/conf/examples/agent.ini +++ b/docs/modules/conf/examples/agent.ini @@ -81,8 +81,8 @@ socket=/run/slurmrestd/slurmrestd.socket # rather than end users. Slurm-web is officially tested and validated with # the default value only. # -# Default value: 0.0.39 -version=0.0.39 +# Default value: 0.0.40 +version=0.0.40 [filters] @@ -100,6 +100,7 @@ version=0.0.39 # - qos # - cpus # - node_count +# - nodes jobs= job_id user_name @@ -111,6 +112,7 @@ jobs= qos cpus node_count + nodes # List of slurmdbd job fields selected in slurmrestd API when retrieving a # unique job, all other fields are filtered out. @@ -126,7 +128,6 @@ jobs= # - partition # - priority # - qos -# - required # - script # - state # - steps @@ -148,7 +149,6 @@ acctjob= partition priority qos - required script state steps @@ -176,7 +176,6 @@ acctjob= # - standard_error # - standard_input # - standard_output -# - state_reason # - tasks # - tres_req_str ctldjob= @@ -192,7 +191,6 @@ ctldjob= standard_error standard_input standard_output - state_reason tasks tres_req_str diff --git a/docs/modules/conf/partials/conf-agent.adoc b/docs/modules/conf/partials/conf-agent.adoc index 63d9e45d..deae5e7e 100644 --- a/docs/modules/conf/partials/conf-agent.adoc +++ b/docs/modules/conf/partials/conf-agent.adoc @@ -159,7 +159,7 @@ the default value only. -*Default:* `0.0.39` +*Default:* `0.0.40` |- @@ -207,6 +207,8 @@ jobs, all other fields arefiltered out. * `node_count` +* `nodes` + |- @@ -242,8 +244,6 @@ unique job, all other fields are filtered out. * `qos` -* `required` - * `script` * `state` @@ -303,8 +303,6 @@ unique job, all other fields are filtered out. * `standard_output` -* `state_reason` - * `tasks` * `tres_req_str` diff --git a/docs/modules/install/pages/quickstart.adoc b/docs/modules/install/pages/quickstart.adoc index 62dd8011..7d7b4fd5 100644 --- a/docs/modules/install/pages/quickstart.adoc +++ b/docs/modules/install/pages/quickstart.adoc @@ -2,9 +2,9 @@ == Requirements -:fn-slurm-version: footnote:slurm-version[Slurm-web {version} actually requires Slurm REST API v0.0.39 available in Slurm 23.02 and above. Please refer to xref:overview:architecture.adoc#slurm-versions[Slurm REST API versions section] for more details.] +:fn-slurm-version: footnote:slurm-version[Slurm-web {version} actually requires Slurm REST API v{api_version} available in Slurm 23.11 and above. Please refer to xref:overview:architecture.adoc#slurm-versions[Slurm REST API versions section] for more details.] -* Cluster with Slurm >= 23.02 {fn-slurm-version} and +* Cluster with Slurm >= 23.11 {fn-slurm-version} and https://slurm.schedmd.com/accounting.html[accounting enabled] * Host installed with a supported GNU/Linux distributions among: ** CentOS @@ -132,16 +132,18 @@ Enable and start `slurmrestd` service: To check `slurmrestd` daemon is properly running, run this command: -[source,console] +[source,console,subs=attributes] ---- -# curl --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurm/v0.0.39/diag +# curl --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurm/v{api_version}/diag { "meta": { "plugin": { - "type": "openapi\/v0.0.39", - "name": "Slurm OpenAPI v0.0.39", - "data_parser": "v0.0.39" - }, + "type": "openapi\/slurmctld", + "name": "Slurm OpenAPI slurmctld", + "data_parser": "data_parser\/v{api_version}", + "accounting_storage": "accounting_storage\/slurmdbd" + }, + } … } ---- diff --git a/docs/modules/misc/pages/troubleshooting.adoc b/docs/modules/misc/pages/troubleshooting.adoc index 5dcf4414..aeb797e1 100644 --- a/docs/modules/misc/pages/troubleshooting.adoc +++ b/docs/modules/misc/pages/troubleshooting.adoc @@ -9,17 +9,17 @@ This page contains troubleshooting tips to help find out the reason of issues. Test Slurm `slurmrestd` API is properly responding on Unix socket with this command: -[source,console] +[source,console,subs=attributes] ---- -$ curl --silent --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurm/v0.0.39/diag | \ +$ curl --silent --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurm/v{api_version}/diag | \ jq '.statistics | with_entries(select(.key | startswith("jobs")))' { - "jobs_submitted": 0, - "jobs_started": 0, - "jobs_completed": 0, + "jobs_submitted": 385, + "jobs_started": 407, + "jobs_completed": 411, "jobs_canceled": 0, "jobs_failed": 0, - "jobs_pending": 40, + "jobs_pending": 0, "jobs_running": 0 } ---- @@ -29,9 +29,9 @@ cluster. Test Slurm accounting on in REST API with this command: -[source,console] +[source,console,subs=attributes] ---- -$ curl --silent --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurmdb/v0.0.39/config | \ +$ curl --silent --unix-socket /run/slurmrestd/slurmrestd.socket http://slurm/slurmdb/v{api_version}/config | \ jq .clusters[].nodes "cn[1-4]" ---- diff --git a/docs/modules/overview/pages/architecture.adoc b/docs/modules/overview/pages/architecture.adoc index 441768c3..221d7fb4 100644 --- a/docs/modules/overview/pages/architecture.adoc +++ b/docs/modules/overview/pages/architecture.adoc @@ -107,8 +107,8 @@ to the *agents* deployed on the clusters. == Slurm REST API versions Slurm-web {version} is officially tested and supported with Slurm REST API -*v0.0.39*. This version of Slurm REST API is available in Slurm 23.02, 23.11 and -24.05. +*v{api_version}*. This version of Slurm REST API is available in Slurm 23.11, +24.05 and 24.11. This table represents all Slurm REST API versions supported by the latest Slurm releases: @@ -123,30 +123,23 @@ releases: |*Compatible* |*Deprecated* -|22.05 -|0.0.38 -|0.0.37 -|0.0.36 - |23.02 -|*0.0.39* +|0.0.39 |0.0.38 |0.0.37 |23.11 -|0.0.40 -|*0.0.39* +|*0.0.40* +|0.0.39 |0.0.38 |24.05 |0.0.41 -|0.0.40 -|*0.0.39* -|=== +|*0.0.40* +|0.0.39 -NOTE: While Slurm-web is officially tested with a specific version of Slurm REST -API, there are actually few changes between API versions (see -{slurmrestd-release-notes}[Slurm OpenAPI plugin release notes] for details) so -there are chances that it works with other versions. This can be tested by -setting `[slurmrestd]>version` the in _agent_ configuration file. Use it at your -own risk! +|24.11 +|0.0.42 +|0.0.41 +|*0.0.40* +|=== diff --git a/frontend/src/components/job/JobFieldExitCode.vue b/frontend/src/components/job/JobFieldExitCode.vue index 8e5edd84..a5dfabb4 100644 --- a/frontend/src/components/job/JobFieldExitCode.vue +++ b/frontend/src/components/job/JobFieldExitCode.vue @@ -20,6 +20,6 @@ defineProps({ diff --git a/frontend/src/components/job/JobStatusBadge.vue b/frontend/src/components/job/JobStatusBadge.vue index 6e985d76..25f2c070 100644 --- a/frontend/src/components/job/JobStatusBadge.vue +++ b/frontend/src/components/job/JobStatusBadge.vue @@ -10,7 +10,10 @@ import { computed } from 'vue' const props = defineProps({ - status: String, + status: { + type: Array, + required: true + }, large: { type: Boolean, default: false @@ -26,28 +29,34 @@ interface JobLabelColors { } const statusColor = computed(() => { - switch (props.status) { - case 'RUNNING': - return { - span: 'bg-green-100 text-green-700', - circle: 'fill-green-500' - } - case 'PENDING': - return { - span: 'bg-yellow-100 text-yellow-800', - circle: 'fill-yellow-500' - } - case 'CANCELLED': - return { - span: 'bg-purple-100 text-purple-700', - circle: 'fill-purple-500' - } - default: - return { - span: 'bg-gray-100 text-gray-600', - circle: 'fill-gray-400' - } - } + if (props.status.includes('RUNNING')) + return { + span: 'bg-green-100 text-green-700', + circle: 'fill-green-500' + } + else if (props.status.includes('PENDING')) + return { + span: 'bg-yellow-100 text-yellow-800', + circle: 'fill-yellow-500' + } + else if (props.status.includes('CANCELLED')) + return { + span: 'bg-purple-100 text-purple-700', + circle: 'fill-purple-500' + } + else + return { + span: 'bg-gray-100 text-gray-600', + circle: 'fill-gray-400' + } +}) + +const mainStatus = computed(() => { + if (props.status.includes('RUNNING')) return 'RUNNING' + else if (props.status.includes('PENDING')) return 'PENDING' + else if (props.status.includes('CANCELLED')) return 'CANCELLED' + else if (props.status.includes('COMPLETED')) return 'COMPLETED' + else return props.status[0] as string }) @@ -66,7 +75,7 @@ const statusColor = computed(() => { {{ label }} diff --git a/frontend/src/composables/GatewayAPI.ts b/frontend/src/composables/GatewayAPI.ts index 00518269..903eb46e 100644 --- a/frontend/src/composables/GatewayAPI.ts +++ b/frontend/src/composables/GatewayAPI.ts @@ -65,16 +65,17 @@ export interface ClusterStats { } export interface ClusterJob { - job_id: number - user_name: string account: string - job_state: string - state_reason: string + cpus: ClusterOptionalNumber + job_id: number + job_state: string[] + node_count: ClusterOptionalNumber + nodes: string partition: string priority: ClusterOptionalNumber qos: string - cpus: ClusterOptionalNumber - node_count: ClusterOptionalNumber + state_reason: string + user_name: string } export interface ClusterTRES { @@ -108,8 +109,41 @@ export interface ClusterJobTime { user: ClusterPreciseTime } +interface ClusterAccountedResources { + average: ClusterTRES[] + max: ClusterTRES[] + min: ClusterTRES[] + total: ClusterTRES[] +} + export interface ClusterJobStep { - step: { id: { job_id: number; step_id: string }; name: string } + CPU: { + governor: string + requested_frequency: { max: ClusterOptionalNumber; min: ClusterOptionalNumber } + } + exit_code: ClusterJobExitCode + kill_request_user: string + nodes: { count: number; list: string[]; range: string } + pid: string + state: string[] + statistics: { CPU: { actual_frequency: number }; energy: { consumed: ClusterOptionalNumber } } + step: { id: string; name: string } + task: { distribution: string } + tasks: { count: number } + time: { + elapsed: number + end: ClusterOptionalNumber + start: ClusterOptionalNumber + suspended: number + system: ClusterPreciseTime + total: ClusterPreciseTime + user: ClusterPreciseTime + } + tres: { + allocated: ClusterTRES[] + consumed: ClusterAccountedResources + requested: ClusterAccountedResources + } } export interface ClusterJobComment { @@ -119,13 +153,14 @@ export interface ClusterJobComment { } export interface ClusterJobExitCode { - return_code: number - status: string + return_code: ClusterOptionalNumber + signal: { id: ClusterOptionalNumber; name: string } + status: string[] } export interface ClusterIndividualJob { - accrue_time: number - association: { account: string; cluster: string; partition: string; user: string } + accrue_time: ClusterOptionalNumber + association: { account: string; cluster: string; id: number; partition: string; user: string } batch_flag: boolean command: string comment: ClusterJobComment @@ -135,7 +170,7 @@ export interface ClusterIndividualJob { exclusive: string[] exit_code: ClusterJobExitCode group: string - last_sched_evaluation: number + last_sched_evaluation: ClusterOptionalNumber name: string node_count: ClusterOptionalNumber nodes: string @@ -146,7 +181,7 @@ export interface ClusterIndividualJob { standard_error: string standard_input: string standard_output: string - state: { current: string; reason: string } + state: { current: string[]; reason: string } steps: ClusterJobStep[] submit_line: string tasks: ClusterOptionalNumber @@ -186,23 +221,24 @@ export function getNodeAllocationState(node: ClusterNode): ClusterNodeAllocatedS } } export interface ClusterNode { - name: string - sockets: number + alloc_cpus: number + alloc_idle_cpus: number cores: number cpus: number + name: string + partitions: Array real_memory: number + sockets: number state: Array reason: string - partitions: Array } export interface ClusterIndividualNode extends ClusterNode { architecture: string operating_system: string - boot_time: number - last_busy: number + boot_time: ClusterOptionalNumber + last_busy: ClusterOptionalNumber threads: number - alloc_cpus: number alloc_memory: number } @@ -212,54 +248,80 @@ export interface ClusterPartition { } export interface ClusterQos { - name: string description: string - priority: ClusterOptionalNumber flags: string[] limits: { + factor: ClusterOptionalNumber + grace_time: number max: { + accruing: { + per: { + account: ClusterOptionalNumber // MaxJobsAccruePerAccount + user: ClusterOptionalNumber // MaxJobsAccruePerUser + } + } active_jobs: { + accruing: ClusterOptionalNumber // GrpJobsAccrue count: ClusterOptionalNumber // GrpJobs } + jobs: { + active_jobs: { + per: { + account: ClusterOptionalNumber // MaxJobsPerAccount + user: ClusterOptionalNumber // MaxJobsPerUser + } + } + per: { + account: ClusterOptionalNumber // MaxJobsSubmitPerAccount + user: ClusterOptionalNumber // MaxJobsSubmitPerUser + } + } tres: { - total: ClusterTRES[] // GrpTRES + minutes: { + per: { + account: ClusterTRES[] // MaxTRESRunMinsPerAccount + job: ClusterTRES[] // MaxTRESMinsPerJob + qos: ClusterTRES[] // GrpTRESMins + user: ClusterTRES[] // MaxTRESRunMinsPerUser + } + } per: { account: ClusterTRES[] // MaxTRESPA job: ClusterTRES[] // MaxTRES node: ClusterTRES[] // MaxTRESPerNode user: ClusterTRES[] // MaxTRESPerUser } + total: ClusterTRES[] // GrpTRES } wall_clock: { per: { job: ClusterOptionalNumber // MaxWall, in minutes + qos: ClusterOptionalNumber // GrpWall } } - jobs: { - active_jobs: { - per: { - account: ClusterOptionalNumber // MaxJobsPerAccount - user: ClusterOptionalNumber // MaxJobsPerUser - } - } + } + min: { + priority_threshold: ClusterOptionalNumber // MinPrioThreshold + tres: { per: { - account: ClusterOptionalNumber // MaxJobsSubmitPerAccount - user: ClusterOptionalNumber // MaxJobsSubmitPerUser + job: ClusterTRES[] // MinTRES } } } } + name: string + priority: ClusterOptionalNumber } export interface ClusterReservation { - name: string - users: string accounts: string - node_list: string - start_time: number - node_count: number - end_time: number + end_time: ClusterOptionalNumber flags: string[] + name: string + node_count: number + node_list: string + start_time: ClusterOptionalNumber + users: string } export type MetricValue = [number, number] diff --git a/frontend/src/stores/runtime.ts b/frontend/src/stores/runtime.ts index f0db1a5c..485182c4 100644 --- a/frontend/src/stores/runtime.ts +++ b/frontend/src/stores/runtime.ts @@ -127,7 +127,9 @@ export class JobsViewSettings { if (this.filters.states.length != 0) { if ( !this.filters.states.some((state) => { - return state.toLocaleLowerCase() == job.job_state.toLocaleLowerCase() + return job.job_state + .map((_state) => _state.toLocaleLowerCase()) + .includes(state.toLocaleLowerCase()) }) ) { return false diff --git a/frontend/src/views/NodeView.vue b/frontend/src/views/NodeView.vue index 6e9d62b6..02af3961 100644 --- a/frontend/src/views/NodeView.vue +++ b/frontend/src/views/NodeView.vue @@ -190,13 +190,19 @@ if (runtimeStore.hasPermission('view-jobs')) {
Reboot
- {{ new Date(node.data.value.boot_time * 10 ** 3).toLocaleString() }} + +
Last busy
- {{ new Date(node.data.value.last_busy * 10 ** 3).toLocaleString() }} + +