diff --git a/extensions/inference-triton-trtllm-extension/package.json b/extensions/inference-triton-trtllm-extension/package.json index b093a9a7fc..862359fe61 100644 --- a/extensions/inference-triton-trtllm-extension/package.json +++ b/extensions/inference-triton-trtllm-extension/package.json @@ -1,7 +1,7 @@ { "name": "@janhq/inference-triton-trt-llm-extension", "version": "1.0.0", - "description": "Inference Engine for NVIDIA Triton with TensorRT-LLM Extension that can be used with any OpenAI compatible API", + "description": "Inference Engine for NVIDIA Triton with TensorRT-LLM Extension integration on Jan extension framework", "main": "dist/index.js", "module": "dist/module.js", "author": "Jan ", diff --git a/extensions/inference-triton-trtllm-extension/src/index.ts b/extensions/inference-triton-trtllm-extension/src/index.ts index 80c54a3f42..d0de32357c 100644 --- a/extensions/inference-triton-trtllm-extension/src/index.ts +++ b/extensions/inference-triton-trtllm-extension/src/index.ts @@ -3,7 +3,7 @@ * The class provides methods for initializing and stopping a model, and for making inference requests. * It also subscribes to events emitted by the @janhq/core package and handles new message requests. * @version 1.0.0 - * @module inference-openai-extension/src/index + * @module inference-nvidia-triton-trt-llm-extension/src/index */ import {