From 1a05eb6f574da63eb5e4043125e0f53f7905dc85 Mon Sep 17 00:00:00 2001 From: Bianco95 Date: Fri, 14 Jun 2024 12:36:22 +0200 Subject: [PATCH] minor --- pkg/common/func.go | 4 ++-- pkg/docker/Create.go | 12 ++++++++---- pkg/docker/Status.go | 1 - pkg/docker/aux.go | 1 - pkg/docker/gpustrategies/NvidiaHandler.go | 22 +++++----------------- 5 files changed, 15 insertions(+), 25 deletions(-) diff --git a/pkg/common/func.go b/pkg/common/func.go index 3ba8d84..4b442c4 100644 --- a/pkg/common/func.go +++ b/pkg/common/func.go @@ -50,10 +50,10 @@ func NewInterLinkConfig() (InterLinkConfig, error) { return InterLinkConfig{}, err } - log.G(context.Background()).Info("Loading InterLink config from " + path) + log.G(context.Background()).Info("\u2705 Loading InterLink config from " + path) yfile, err := os.ReadFile(path) if err != nil { - log.G(context.Background()).Error("Error opening config file, exiting...") + log.G(context.Background()).Error("\u274C Error opening config file, exiting...") return InterLinkConfig{}, err } yaml.Unmarshal(yfile, &InterLinkConfigInst) diff --git a/pkg/docker/Create.go b/pkg/docker/Create.go index 822d4e0..cfb8d2a 100644 --- a/pkg/docker/Create.go +++ b/pkg/docker/Create.go @@ -66,8 +66,8 @@ func (h *SidecarHandler) prepareDockerRuns(podData commonIL.RetrievedPodData, w if volume.PersistentVolumeClaim != nil { if _, ok := pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName]; !ok { - // WIP: this is a temporary solution to mount CVMFS volumes - pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName] = "/mnt/cvmfs" + // WIP: this is a temporary solution to mount CVMFS volumes for persistent volume claims case + pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName] = "/cvmfs" } } @@ -94,10 +94,10 @@ func (h *SidecarHandler) prepareDockerRuns(podData commonIL.RetrievedPodData, w // if the container is requesting 0 GPU, skip the GPU assignment if numGpusRequested == 0 { - log.G(h.Ctx).Info("Container " + containerName + " is not requesting a GPU") + log.G(h.Ctx).Info("\u2705 Container " + containerName + " is not requesting a GPU") } else { - log.G(h.Ctx).Info("Container " + containerName + " is requesting " + val.String() + " GPU") + log.G(h.Ctx).Info("\u2705 Container " + containerName + " is requesting " + val.String() + " GPU") isGpuRequested = true @@ -327,6 +327,10 @@ func (h *SidecarHandler) CreateHandler(w http.ResponseWriter, r *http.Request) { dindContainerArgs := []string{"run"} dindContainerArgs = append(dindContainerArgs, gpuArgsAsArray...) + if _, err := os.Stat("/cvmfs"); err == nil { + dindContainerArgs = append(dindContainerArgs, "-v", "/cvmfs:/cvmfs") + } + dindContainerArgs = append(dindContainerArgs, "--privileged", "-v", "/home:/home", "-v", "/var/lib/docker/overlay2:/var/lib/docker/overlay2", "-v", "/var/lib/docker/image:/var/lib/docker/image", "-d", "--name", string(data.Pod.UID)+"_dind", dindImage) var dindContainerID string diff --git a/pkg/docker/Status.go b/pkg/docker/Status.go index f80489d..4e002f9 100644 --- a/pkg/docker/Status.go +++ b/pkg/docker/Status.go @@ -67,7 +67,6 @@ func (h *SidecarHandler) StatusHandler(w http.ResponseWriter, r *http.Request) { containerstatus := strings.Split(execReturn.Stdout, " ") - // TODO: why first container? if execReturn.Stdout != "" { log.G(h.Ctx).Info("\u2705 [STATUS CALL] The container " + container.Name + " is in the state: " + containerstatus[0]) diff --git a/pkg/docker/aux.go b/pkg/docker/aux.go index 21a5b95..93cc0cf 100644 --- a/pkg/docker/aux.go +++ b/pkg/docker/aux.go @@ -127,7 +127,6 @@ func prepareMounts(Ctx context.Context, config commonIL.InterLinkConfig, data co } for _, emptyDir := range cont.EmptyDirs { - log.G(Ctx).Info("-- EmptyDir to handle " + emptyDir) if containerName == podNamespace+"-"+podUID+"-"+cont.Name { paths, err := mountData(Ctx, config, data.Pod, emptyDir, container) if err != nil { diff --git a/pkg/docker/gpustrategies/NvidiaHandler.go b/pkg/docker/gpustrategies/NvidiaHandler.go index 5c454f0..4b93226 100644 --- a/pkg/docker/gpustrategies/NvidiaHandler.go +++ b/pkg/docker/gpustrategies/NvidiaHandler.go @@ -58,8 +58,6 @@ func (a *GPUManager) Init() error { // Discover implements the Discover function of the GPUManager interface func (a *GPUManager) Discover() error { - log.G(a.Ctx).Info("Discovering GPUs...") - count, ret := nvml.DeviceGetCount() if ret != nvml.SUCCESS { return fmt.Errorf("Unable to get device count: %v", nvml.ErrorString(ret)) @@ -92,12 +90,12 @@ func (a *GPUManager) Discover() error { // print the GPUSpecsList if the length is greater than 0 if len(a.GPUSpecsList) > 0 { - log.G(a.Ctx).Info("Discovered GPUs:") + log.G(a.Ctx).Info("\u2705 Discovered GPUs:") for _, gpuSpec := range a.GPUSpecsList { - log.G(a.Ctx).Info(fmt.Sprintf("Name: %s, UUID: %s, Type: %s, Available: %t, Index: %d", gpuSpec.Name, gpuSpec.UUID, gpuSpec.Type, gpuSpec.Available, gpuSpec.Index)) + log.G(a.Ctx).Info(fmt.Sprintf("\u2705 Name: %s, UUID: %s, Type: %s, Available: %t, Index: %d", gpuSpec.Name, gpuSpec.UUID, gpuSpec.Type, gpuSpec.Available, gpuSpec.Index)) } } else { - log.G(a.Ctx).Info("No GPUs discovered") + log.G(a.Ctx).Info(" \u2705 No GPUs discovered") } return nil @@ -105,8 +103,6 @@ func (a *GPUManager) Discover() error { func (a *GPUManager) Check() error { - log.G(a.Ctx).Info("Checking the availability of GPUs...") - cli, err := client.NewEnvClient() if err != nil { return fmt.Errorf("unable to create a new Docker client: %v", err) @@ -148,9 +144,9 @@ func (a *GPUManager) Check() error { // print the GPUSpecsList that are not available for _, gpuSpec := range a.GPUSpecsList { if !gpuSpec.Available { - log.G(a.Ctx).Info(fmt.Sprintf("GPU with UUID %s is not available. It is in use by container %s", gpuSpec.UUID, gpuSpec.ContainerID)) + log.G(a.Ctx).Info(fmt.Sprintf("\u274C GPU with UUID %s is not available. It is in use by container %s", gpuSpec.UUID, gpuSpec.ContainerID)) } else { - log.G(a.Ctx).Info(fmt.Sprintf("GPU with UUID %s is available", gpuSpec.UUID)) + log.G(a.Ctx).Info(fmt.Sprintf("\u2705 GPU with UUID %s is available", gpuSpec.UUID)) } } @@ -159,8 +155,6 @@ func (a *GPUManager) Check() error { func (a *GPUManager) Shutdown() error { - log.G(a.Ctx).Info("Shutting down NVML...") - ret := nvml.Shutdown() if ret != nvml.SUCCESS { return fmt.Errorf("Unable to shutdown NVML: %v", nvml.ErrorString(ret)) @@ -193,8 +187,6 @@ func (a *GPUManager) Assign(UUID string, containerID string) error { func (a *GPUManager) Release(containerID string) error { - log.G(a.Ctx).Info("Releasing GPU from container " + containerID) - a.GPUSpecsMutex.Lock() defer a.GPUSpecsMutex.Unlock() @@ -210,8 +202,6 @@ func (a *GPUManager) Release(containerID string) error { } } - log.G(a.Ctx).Info("Correctly released GPU from container " + containerID) - return nil } @@ -252,8 +242,6 @@ func (a *GPUManager) GetAndAssignAvailableGPUs(numGPUs int, containerID string) // dump the GPUSpecsList into a JSON file func (a *GPUManager) Dump() error { - log.G(a.Ctx).Info("Dumping the GPUSpecsList into a JSON file...") - // Convert the array to JSON format jsonData, err := json.MarshalIndent(a.GPUSpecsList, "", " ") if err != nil {