From 891ddafc33bcaa858d2e57d66a5c428883491787 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 13 Sep 2023 11:59:36 -0400 Subject: [PATCH] When device is Auto (the default) then we will only consider discrete GPU's otherwise fallback to CPU. --- gpt4all-chat/chatllm.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 58d68e60d71d..4ae8c8431296 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -275,7 +275,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) if (requestedDevice != "CPU") { const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString()); std::vector availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory); - if (!availableDevices.empty() && requestedDevice == "Auto") { + if (!availableDevices.empty() && requestedDevice == "Auto" && devices.front().type == 2 /*a discrete gpu*/) { m_llModelInfo.model->initializeGPUDevice(devices.front()); } else { for (LLModel::GPUDevice &d : availableDevices) {