diff --git a/src/app.ts b/src/app.ts index 626ed0c..6616fdf 100644 --- a/src/app.ts +++ b/src/app.ts @@ -3,22 +3,19 @@ import { cors } from "hono/cors" import { getRuntimeKey } from "hono/adapter" import { logger } from "hono/logger" import { timing } from "hono/timing" -import { chatProxyHandler } from "./chat/complete/ChatProxyHandler.ts" +import { chatProxyHandler } from "./v1/chat/completions/ChatProxyHandler.ts" import { Logger, gen_logger } from "./log.ts" -const openAiRoute = new Hono<{ Variables: { log: Logger } }>() - .use("*", async (c, next) => { +export const app = new Hono({ strict: true }) + .use("*", cors(), timing(), logger()) + .use("*", async (c: ContextWithLogger, next) => { const logger = gen_logger(crypto.randomUUID()) c.set("log", logger) await next() c.set("log", undefined) }) - .post("/v1/chat/completions", chatProxyHandler) - -export const app = new Hono({ strict: true }) - .use("*", cors(), timing(), logger()) .options("*", (c) => c.text("", 204)) - .route("/", openAiRoute) .get("/", (c) => c.text(`Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}!`)) + .post("/v1/chat/completions", chatProxyHandler) export type ContextWithLogger = Context<{ Variables: { log: Logger } }> diff --git a/src/log.ts b/src/log.ts index 0a577b5..5d266b9 100644 --- a/src/log.ts +++ b/src/log.ts @@ -11,14 +11,14 @@ const currentlevel = LogLevel.debug export function gen_logger(id: string) { return mapValues(LogLevel, (value, name) => { return (msg: Any) => { - out_func(name, value, `${id} ${msg}`) + outFunc(name, value, `${id} ${msg}`) } }) } export type Logger = ReturnType -function out_func(levelName: string, levelValue: number, msg: string) { +function outFunc(levelName: string, levelValue: number, msg: string) { if (levelValue > currentlevel) { return } diff --git a/src/utils.ts b/src/utils.ts index 747499f..5495d89 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -24,9 +24,9 @@ function parseBase64(base64: string): Part { } } -export function openAIMessageToGeminiMessage( - messages: Array, -): Array { +export function openAiMessageToGeminiMessage( + messages: OpenAI.Chat.ChatCompletionMessageParam[], +): Content[] { const result: Content[] = messages.flatMap(({ role, content }) => { if (role === "system") { return [ @@ -51,12 +51,16 @@ export function openAIMessageToGeminiMessage( } function hasImageMessage( - messages: Array, + messages: OpenAI.Chat.ChatCompletionMessageParam[], ): boolean { return messages.some((msg) => { const content = msg.content - if (content == null) return false - if (typeof content === "string") return false + if (content == null) { + return false + } + if (typeof content === "string") { + return false + } return content.some((it) => it.type === "image_url") }) } diff --git a/src/chat/complete/ChatProxyHandler.ts b/src/v1/chat/completions/ChatProxyHandler.ts similarity index 88% rename from src/chat/complete/ChatProxyHandler.ts rename to src/v1/chat/completions/ChatProxyHandler.ts index 1c7e291..0deeb82 100644 --- a/src/chat/complete/ChatProxyHandler.ts +++ b/src/v1/chat/completions/ChatProxyHandler.ts @@ -1,13 +1,13 @@ import { GoogleGenerativeAI } from "@google/generative-ai" import type { Handler } from "hono" import type { OpenAI } from "openai" -import { getToken } from "../../utils.ts" +import { getToken } from "../../../utils.ts" import { nonStreamingChatProxyHandler } from "./NonStreamingChatProxyHandler.ts" import { streamingChatProxyHandler } from "./StreamingChatProxyHandler.ts" -import { ContextWithLogger } from "../../app.ts" +import { ContextWithLogger } from "../../../app.ts" export const chatProxyHandler: Handler = async (c: ContextWithLogger) => { - const log = c.get("log") + const log = c.var.log const req = await c.req.json() log.debug(JSON.stringify(req)) diff --git a/src/chat/complete/NonStreamingChatProxyHandler.ts b/src/v1/chat/completions/NonStreamingChatProxyHandler.ts similarity index 83% rename from src/chat/complete/NonStreamingChatProxyHandler.ts rename to src/v1/chat/completions/NonStreamingChatProxyHandler.ts index 5e25056..43330e8 100644 --- a/src/chat/complete/NonStreamingChatProxyHandler.ts +++ b/src/v1/chat/completions/NonStreamingChatProxyHandler.ts @@ -1,5 +1,5 @@ import type { OpenAI } from "openai" -import { genModel, openAIMessageToGeminiMessage } from "../../utils.ts" +import { genModel, openAiMessageToGeminiMessage } from "../../../utils.ts" import { ChatProxyHandlerType } from "./ChatProxyHandler.ts" export const nonStreamingChatProxyHandler: ChatProxyHandlerType = async ( @@ -7,11 +7,11 @@ export const nonStreamingChatProxyHandler: ChatProxyHandlerType = async ( req, genAi, ) => { - const log = c.get("log") + const log = c.var.log const model = genModel(genAi, req) const geminiResp: string = await model .generateContent({ - contents: openAIMessageToGeminiMessage(req.messages), + contents: openAiMessageToGeminiMessage(req.messages), }) .then((it) => it.response.text()) .catch((err) => err?.message ?? err.toString()) diff --git a/src/chat/complete/StreamingChatProxyHandler.ts b/src/v1/chat/completions/StreamingChatProxyHandler.ts similarity index 75% rename from src/chat/complete/StreamingChatProxyHandler.ts rename to src/v1/chat/completions/StreamingChatProxyHandler.ts index 027358c..1f720c3 100644 --- a/src/chat/complete/StreamingChatProxyHandler.ts +++ b/src/v1/chat/completions/StreamingChatProxyHandler.ts @@ -1,6 +1,6 @@ import type { OpenAI } from "openai" import { streamSSE } from "hono/streaming" -import { genModel, openAIMessageToGeminiMessage } from "../../utils.ts" +import { genModel, openAiMessageToGeminiMessage } from "../../../utils.ts" import { ChatProxyHandlerType } from "./ChatProxyHandler.ts" export const streamingChatProxyHandler: ChatProxyHandlerType = async ( @@ -8,10 +8,10 @@ export const streamingChatProxyHandler: ChatProxyHandlerType = async ( req, genAi, ) => { - const log = c.get("log") + const log = c.var.log const model = genModel(genAi, req) - const genOpenAIResp = (content: string, stop: boolean) => + const genOpenAiResp = (content: string, stop: boolean) => ({ id: "chatcmpl-abc123", object: "chat.completion.chunk", @@ -29,23 +29,23 @@ export const streamingChatProxyHandler: ChatProxyHandlerType = async ( return streamSSE(c, async (sseStream) => { await model .generateContentStream({ - contents: openAIMessageToGeminiMessage(req.messages), + contents: openAiMessageToGeminiMessage(req.messages), }) .then(async ({ stream, response }) => { for await (const { text } of stream) { await sseStream.writeSSE({ - data: JSON.stringify(genOpenAIResp(text(), false)), + data: JSON.stringify(genOpenAiResp(text(), false)), }) } await sseStream.writeSSE({ - data: JSON.stringify(genOpenAIResp("", true)), + data: JSON.stringify(genOpenAiResp("", true)), }) const geminiResult = (await response).text() log.info(JSON.stringify(geminiResult)) }) .catch(async (e) => { await sseStream.writeSSE({ - data: JSON.stringify(genOpenAIResp(e.toString(), true)), + data: JSON.stringify(genOpenAiResp(e.toString(), true)), }) log.info(e) }) diff --git a/test/chat-completion_test.ts b/test/chat-completion_test.ts index 2f352a4..197be9d 100644 --- a/test/chat-completion_test.ts +++ b/test/chat-completion_test.ts @@ -27,5 +27,7 @@ test("test", async () => { }) test("open ai client test", () => { - if (typeof globalThis.Deno === "undefined") return + if (typeof globalThis.Deno === "undefined") { + return + } })