Skip to content

Commit

Permalink
added file loader for gemma model
Browse files Browse the repository at this point in the history
  • Loading branch information
TetsuakiBaba committed Aug 5, 2024
1 parent 93d9b23 commit bb47d35
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 3 deletions.
5 changes: 5 additions & 0 deletions LLM/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@
<h1 class="display-1">LLM with p5.js</h1>
<div class="row">
<div class="col-md-6">
<!-- ローカルファイルのモデルファイルを読み込むための select model ボタンを作成 -->
<p>
<input type="file" accept=".bin" id="load_model">
</p>

Input:<br />
<textarea id="input" style="height: 100px; width: 100%"></textarea><br />
<input type="button" id="submit" value="Get Response" disabled /><br />
Expand Down
22 changes: 19 additions & 3 deletions LLM/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,22 @@ const submit = document.getElementById('submit');
const modelFileName = 'gemma-2b-it-gpu-int4.bin'; /* Update the file name */
// const modelFileName = 'gemma-1.1-7b-it-gpu-int8.bin'; /* Update the file name */


document.querySelector('#load_model').addEventListener('change', function () {
const file = this.files[0];
const reader = new FileReader();
reader.onload = function (e) {
const arrayBuffer = e.target.result;
const blob = new Blob([arrayBuffer], { type: 'application/octet-stream' });
const blobUrl = URL.createObjectURL(blob);
runDemo(blobUrl);
};

// If you want to read the file as text, you can just call reader.readAsText('gemma-2b-it-gpu-int4.bin'); instead of reader.readAsArrayBuffer(file);
reader.readAsArrayBuffer(file);
});


/**
* Display newly generated partial results to the output text box.
*/
Expand All @@ -38,10 +54,11 @@ function displayPartialResults(partialResults, complete) {
}
}


/**
* Main function to run LLM Inference.
*/
async function runDemo() {
async function runDemo(model_file_path) {
const genaiFileset = await FilesetResolver.forGenAiTasks(
'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-genai/wasm');
let llmInference;
Expand All @@ -55,7 +72,7 @@ async function runDemo() {
submit.value = 'Loading the model...'
LlmInference
.createFromOptions(genaiFileset, {
baseOptions: { modelAssetPath: modelFileName },
baseOptions: { modelAssetPath: model_file_path },
maxTokens: 512, // The maximum number of tokens (input tokens + output
// // tokens) the model handles.
randomSeed: 1, // The random seed used during text generation.
Expand All @@ -76,4 +93,3 @@ async function runDemo() {
});
}

runDemo();

0 comments on commit bb47d35

Please sign in to comment.