Skip to content

Commit

Permalink
python bindings should be quiet by default
Browse files Browse the repository at this point in the history
* disable llama.cpp logging unless GPT4ALL_VERBOSE_LLAMACPP envvar is
  nonempty
* make verbose flag for retrieve_model default false (but also be
  overridable via gpt4all constructor)

should be able to run a basic test:

```python
import gpt4all
model = gpt4all.GPT4All('/Users/aaron/Downloads/rift-coder-v0-7b-q4_0.gguf')
print(model.generate('def fib(n):'))
```

and see no non-model output when successful
  • Loading branch information
apage43 committed Oct 10, 2023
1 parent df66226 commit d29c884
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 3 deletions.
18 changes: 17 additions & 1 deletion gpt4all-backend/llamamodel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,17 @@ namespace {
const char *modelType_ = "LLaMA";
}

static void null_log_callback(enum ggml_log_level, const char*, void*) {
return;
}

static bool llama_verbose() {
const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP");
if (var == nullptr) return false;
if (var[0] == 0) return false;
return true;
}

struct gpt_params {
int32_t seed = -1; // RNG seed
int32_t n_keep = 0; // number of tokens to keep from initial prompt
Expand Down Expand Up @@ -144,7 +155,9 @@ bool LLamaModel::loadModel(const std::string &modelPath)
d_ptr->params.use_mlock = params.use_mlock;
#endif
#ifdef GGML_USE_METAL
std::cerr << "llama.cpp: using Metal" << std::endl;
if (llama_verbose()) {
std::cerr << "llama.cpp: using Metal" << std::endl;
}
// metal always runs the whole model if n_gpu_layers is not 0, at least
// currently
d_ptr->params.n_gpu_layers = 1;
Expand Down Expand Up @@ -390,6 +403,9 @@ DLL_EXPORT bool magic_match(const char * fname) {
}

DLL_EXPORT LLModel *construct() {
if (!llama_verbose()) {
llama_log_set(null_log_callback, nullptr);
}
return new LLamaModel;
}
}
5 changes: 3 additions & 2 deletions gpt4all-bindings/python/gpt4all/gpt4all.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def __init__(
allow_download: bool = True,
n_threads: Optional[int] = None,
device: Optional[str] = "cpu",
verbose: bool = False,
):
"""
Constructor
Expand All @@ -89,7 +90,7 @@ def __init__(
self.model_type = model_type
self.model = pyllmodel.LLModel()
# Retrieve model and download if allowed
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download)
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)
if device is not None:
if device != "cpu":
self.model.init_gpu(model_path=self.config["path"], device=device)
Expand Down Expand Up @@ -117,7 +118,7 @@ def retrieve_model(
model_name: str,
model_path: Optional[str] = None,
allow_download: bool = True,
verbose: bool = True,
verbose: bool = False,
) -> ConfigType:
"""
Find model file, and if it doesn't exist, download the model.
Expand Down

0 comments on commit d29c884

Please sign in to comment.