Skip to content

Commit

Permalink
Update Model.json (#1005)
Browse files Browse the repository at this point in the history
* add(mixtral): add model.json for mixtral

* archived some models + update the model.json

* add(model): add pandora 10.7b

* fix(model): update description

* fix(model): pump vers and change the featured model to trinity

* fix(model): archive neuralchat

* fix(model): decapriated all old models

* fix(trinity): add cover image and change description

* fix(trinity): update cover png

* add(pandora): cover image

* fix(pandora): cover image

* add(mixtral): add model.json for mixtral

* archived some models + update the model.json

* add(model): add pandora 10.7b

* fix(model): update description

* fix(model): pump vers and change the featured model to trinity

* fix(model): archive neuralchat

* fix(model): decapriated all old models

* fix(trinity): add cover image and change description

* fix(trinity): update cover png

* add(pandora): cover image

* fix(pandora): cover image

* chore: model desc nits

* fix(models): adjust the size for solars and pandoras

* add(mixtral): description

---------

Co-authored-by: 0xSage <n@pragmatic.vc>
  • Loading branch information
2 people authored and hiro-v committed Dec 20, 2023
1 parent 314df59 commit a893b59
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 88 deletions.
48 changes: 27 additions & 21 deletions models/mixtral-8x7b-instruct/model.json
Original file line number Diff line number Diff line change
@@ -1,22 +1,28 @@
{
"source_url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
"id": "mixtral-8x7b-instruct",
"object": "model",
"name": "Mixtral 8x7B Instruct Q4",
"version": "1.0",
"description": "The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks we tested.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "[INST] {prompt} [/INST]"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "MistralAI, TheBloke",
"tags": ["MOE", "Foundational Model"],
"size": 26440000000
},
"engine": "nitro"
}
"source": [
{
"filename": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
"url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
}
],
"id": "mixtral-8x7b-instruct",
"object": "model",
"name": "Mixtral 8x7B Instruct Q4",
"version": "1.0",
"description": "The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks we tested.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "[INST] {prompt} [/INST]",
"llama_model_path": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "MistralAI, TheBloke",
"tags": ["MOE", "Foundational Model"],
"size": 26440000000
},
"engine": "nitro"
}
Binary file removed models/openhermes-neural-7b/cover.png
Binary file not shown.
51 changes: 28 additions & 23 deletions models/pandora-10.7b-v1/model.json
Original file line number Diff line number Diff line change
@@ -1,24 +1,29 @@
{
"source_url": "https://huggingface.co/janhq/pandora-v1-10.7b-GGUF/resolve/main/pandora-v1-10.7b.Q4_K_M.gguf",
"id": "pandora-10.7b-v1",
"object": "model",
"name": "Pandora 11B Q4",
"version": "1.0",
"description": "Pandora, our research model, employs the Passthrough merging technique to merge 2x7B models into 1.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Jan",
"tags": ["11B","Finetuned", "Featured"],
"size": 6360000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/pandora-10.7b-v1/cover.png"
},
"engine": "nitro"
}

"source": [
{
"filename": "pandora-v1-10.7b.Q4_K_M.gguf",
"url": "https://huggingface.co/janhq/pandora-v1-10.7b-GGUF/resolve/main/pandora-v1-10.7b.Q4_K_M.gguf"
}
],
"id": "pandora-10.7b-v1",
"object": "model",
"name": "Pandora 11B Q4",
"version": "1.0",
"description": "Pandora, our research model, employs the Passthrough merging technique to merge 2x7B models into 1.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "pandora-v1-10.7b.Q4_K_M.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Jan",
"tags": ["11B", "Finetuned", "Featured"],
"size": 6360000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/pandora-10.7b-v1/cover.png"
},
"engine": "nitro"
}
49 changes: 27 additions & 22 deletions models/solar-10.7b-instruct/model.json
Original file line number Diff line number Diff line change
@@ -1,23 +1,28 @@
{
"source_url": "https://huggingface.co/janhq/Solar-10.7B-Instruct-v1.0/resolve/main/solar-10.7b-instruct-v1.0.Q4_K_M.gguf",
"id": "solar-10.7b-instruct",
"object": "model",
"name": "Solar Instruct 10.7B Q4",
"version": "1.0",
"description": "SOLAR-10.7B model built on the Llama2 architecture with Depth Up-Scaling and integrated Mistral 7B weights. Its robustness and adaptability make it ideal for fine-tuning applications, significantly enhancing performance with simple instruction-based techniques.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "### User: {prompt}\n### Assistant:"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Upstage, Jan",
"tags": ["11B","Finetuned"],
"size": 6360000000
},
"engine": "nitro"
}

"source": [
{
"filename": "solar-10.7b-instruct-v1.0.Q4_K_M.gguf",
"url": "https://huggingface.co/janhq/Solar-10.7B-Instruct-v1.0/resolve/main/solar-10.7b-instruct-v1.0.Q4_K_M.gguf"
}
],
"id": "solar-10.7b-instruct",
"object": "model",
"name": "Solar Instruct 10.7B Q4",
"version": "1.0",
"description": "SOLAR-10.7B model built on the Llama2 architecture with Depth Up-Scaling and integrated Mistral 7B weights. Its robustness and adaptability make it ideal for fine-tuning applications, significantly enhancing performance with simple instruction-based techniques.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "### User: {prompt}\n### Assistant:",
"llama_model_path": "solar-10.7b-instruct-v1.0.Q4_K_M.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Upstage, Jan",
"tags": ["11B", "Finetuned"],
"size": 6360000000
},
"engine": "nitro"
}
50 changes: 28 additions & 22 deletions models/trinity-v1-7b/model.json
Original file line number Diff line number Diff line change
@@ -1,23 +1,29 @@
{
"source_url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf",
"id": "trinity-v1-7b",
"object": "model",
"name": "Trinity 7B Q4",
"version": "1.0",
"description": "Trinity is an experimental model merge of GreenNodeLM & LeoScorpius using the Slerp method. Recommended for daily assistance purposes.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Jan",
"tags": ["7B", "Merged", "Featured"],
"size": 4370000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1-7b/cover.png"
},
"engine": "nitro"
}
"source": [
{
"filename": "trinity-v1.Q4_K_M.gguf",
"url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf"
}
],
"id": "trinity-v1-7b",
"object": "model",
"name": "Trinity 7B Q4",
"version": "1.0",
"description": "Trinity is an experimental model merge of GreenNodeLM & LeoScorpius using the Slerp method. Recommended for daily assistance purposes.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "trinity-v1.Q4_K_M.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Jan",
"tags": ["7B", "Merged", "Featured"],
"size": 4370000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1-7b/cover.png"
},
"engine": "nitro"
}

0 comments on commit a893b59

Please sign in to comment.