From c8c88cd8b89434450afb4c98cfdcc3737aa26c16 Mon Sep 17 00:00:00 2001 From: Henry Mao <1828968+calclavia@users.noreply.github.com> Date: Sat, 11 Jan 2025 17:50:28 +0800 Subject: [PATCH] Add config schema and example auth call --- README.md | 74 ++++++++++++++++-------------- package-lock.json | 45 ++++++++++--------- package.json | 6 +-- src/config.ts | 50 ++++++++++++++++++++- src/examples/README.md | 2 +- src/examples/shell.ts | 100 ----------------------------------------- src/examples/simple.ts | 41 ++++++++++++----- 7 files changed, 146 insertions(+), 172 deletions(-) delete mode 100644 src/examples/shell.ts diff --git a/README.md b/README.md index 4e9ec70..558162d 100644 --- a/README.md +++ b/README.md @@ -22,68 +22,75 @@ npm install @smithery/sdk ## Usage -In this example, we'll connect use OpenAI client with Exa search capabilities. +In this example, we'll connect to Exa search capabilities using either OpenAI or Anthropic. ```bash -npm install @smithery/mcp-exa +npm install @smithery/sdk @modelcontextprotocol/sdk ``` -The following code sets up OpenAI and connects to an Exa MCP server. In this case, we're running the server locally within the same process, so it's just a simple passthrough. +The following code sets up the client and connects to an Exa MCP server: ```typescript import { MultiClient } from "@smithery/sdk" import { OpenAIChatAdapter } from "@smithery/sdk/integrations/llm/openai" -import * as exa from "@smithery/mcp-exa" +import { AnthropicChatAdapter } from "@smithery/sdk/integrations/llm/anthropic" +import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js" import { OpenAI } from "openai" -import { createTransport } from "@smithery/sdk/registry" +import Anthropic from "@anthropic-ai/sdk" +import EventSource from "eventsource" -const openai = new OpenAI() -const exaServer = exa.createServer({ - apiKey: process.env.EXA_API_KEY, -}) +// Patch event source for Node.js environment +global.EventSource = EventSource as any -const sequentialThinking = await createTransport( - "@modelcontextprotocol/server-sequential-thinking", +// Create a new connection +const exaTransport = new SSEClientTransport( + // Replace with your deployed MCP server URL + new URL("https://your-mcp-server.example.com/sse") ) + +// Initialize a multi-client connection const client = new MultiClient() await client.connectAll({ - exa: exaServer, - sequentialThinking: sequentialThinking, + exa: exaTransport, + // You can add more connections here... +}) + +// Configure and authenticate +await client.clients.exa.request({ + method: "config", + params: { + config: { + apiKey: process.env.EXA_API_KEY, + }, + }, }) ``` -Now you can make your LLM aware of the available tools from Exa. +Now you can use either OpenAI or Anthropic to interact with the tools: ```typescript -// Create an adapter -const adapter = new OpenAIChatAdapter(client) -const response = await openai.chat.completions.create({ +// Using OpenAI +const openai = new OpenAI() +const openaiAdapter = new OpenAIChatAdapter(client) +const openaiResponse = await openai.chat.completions.create({ model: "gpt-4o-mini", - messages: [{ role: "user", content: "In 2024, did OpenAI release GPT-5?" }], - // Pass the tools to OpenAI call - tools: await adapter.listTools(), + messages: [{ role: "user", content: "What AI events are happening in Singapore?" }], + tools: await openaiAdapter.listTools(), }) -// Obtain the tool outputs as new messages -const toolMessages = await adapter.callTool(response) +const openaiToolMessages = await openaiAdapter.callTool(openaiResponse) ``` -Using this, you can easily enable your LLM to call tools and obtain the results. - -However, it's often the case where your LLM needs to call a tool, see its response, and continue processing output of the tool in order to give you a final response. - -In this case, you have to loop your LLM call and update your messages until there are no more toolMessages to continue. - -Example: +For more complex interactions where the LLM needs to process tool outputs and potentially make additional calls, you'll need to implement a conversation loop. Here's an example: ```typescript let messages = [ { role: "user", - content: - "Deduce Obama's age in number of days. It's November 28, 2024 today. Search to ensure correctness.", + content: "What are some AI events happening in Singapore and how many days until the next one?", }, ] const adapter = new OpenAIChatAdapter(client) +let isDone = false while (!isDone) { const response = await openai.chat.completions.create({ @@ -91,6 +98,7 @@ while (!isDone) { messages, tools: await adapter.listTools(), }) + // Handle tool calls const toolMessages = await adapter.callTool(response) @@ -109,14 +117,14 @@ See a full example in the [examples](./src/examples) directory. Error: ReferenceError: EventSource is not defined ``` -This event means you're trying to use EventSource API (which is typically used in the browser) from Node. You'll have to install the following to use it: +This error means you're trying to use EventSource API (which is typically used in the browser) from Node. Install the following packages: ```bash npm install eventsource npm install -D @types/eventsource ``` -Patch the global EventSource object: +Then patch the global EventSource object: ```typescript import EventSource from "eventsource" diff --git a/package-lock.json b/package-lock.json index fd4b7b2..985022a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,13 +8,10 @@ "name": "@smithery/sdk", "version": "0.0.18", "license": "GPL-3.0-only", - "workspaces": [ - "mcps/*" - ], "dependencies": { "@anthropic-ai/sdk": "^0.32.1", "@icons-pack/react-simple-icons": "^10.2.0", - "@modelcontextprotocol/sdk": "^1.0.3", + "@modelcontextprotocol/sdk": "^1.1.1", "openai": "^4.0.0", "uuid": "^11.0.3" }, @@ -33,6 +30,7 @@ "mcps/agent": { "name": "@smithery/mcp-agent", "version": "0.0.5", + "extraneous": true, "dependencies": { "@anthropic-ai/sdk": "^0.32.1", "@modelcontextprotocol/sdk": "^1.0.3", @@ -48,6 +46,7 @@ "mcps/e2b": { "name": "@smithery/mcp-e2b", "version": "0.0.5", + "dev": true, "dependencies": { "@e2b/code-interpreter": "latest", "dotenv": "^16.3.1", @@ -62,6 +61,7 @@ "mcps/exa": { "name": "@smithery/mcp-exa", "version": "0.0.5", + "dev": true, "dependencies": { "@modelcontextprotocol/sdk": "^1.0.3", "exa-js": "^1.3.3", @@ -117,10 +117,12 @@ }, "node_modules/@bufbuild/protobuf": { "version": "2.2.3", + "dev": true, "license": "(Apache-2.0 AND BSD-3-Clause)" }, "node_modules/@connectrpc/connect": { "version": "2.0.0-rc.3", + "dev": true, "license": "Apache-2.0", "peerDependencies": { "@bufbuild/protobuf": "^2.2.0" @@ -128,6 +130,7 @@ }, "node_modules/@connectrpc/connect-web": { "version": "2.0.0-rc.3", + "dev": true, "license": "Apache-2.0", "peerDependencies": { "@bufbuild/protobuf": "^2.2.0", @@ -138,6 +141,7 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/@e2b/code-interpreter/-/code-interpreter-1.0.4.tgz", "integrity": "sha512-8y82UMXBdf/hye8bX2Fn04JlL72rvOenVgsvMZ+cAJqo6Ijdl4EmzzuFpM4mz9s+EJ29+34lGHBp277tiLWuiA==", + "dev": true, "dependencies": { "e2b": "^1.0.5" }, @@ -169,18 +173,18 @@ } }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.0.3", - "license": "MIT", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.1.1.tgz", + "integrity": "sha512-siCApQgBn3U8R93TdumLtezRyRIlrA/a63GrTRO1jP31fRyOohpu0iPLvXzsyptxmy7B8GDxr8+r+Phu6mHgzg==", "dependencies": { "content-type": "^1.0.5", "raw-body": "^3.0.0", "zod": "^3.23.8" + }, + "engines": { + "node": ">=18" } }, - "node_modules/@smithery/mcp-agent": { - "resolved": "mcps/agent", - "link": true - }, "node_modules/@smithery/mcp-e2b": { "resolved": "mcps/e2b", "link": true @@ -189,10 +193,6 @@ "resolved": "mcps/exa", "link": true }, - "node_modules/@smithery/sdk": { - "resolved": "", - "link": true - }, "node_modules/@types/eventsource": { "version": "1.1.15", "dev": true, @@ -266,6 +266,7 @@ }, "node_modules/compare-versions": { "version": "6.1.1", + "dev": true, "license": "MIT" }, "node_modules/content-type": { @@ -277,6 +278,7 @@ }, "node_modules/cross-fetch": { "version": "4.0.0", + "dev": true, "license": "MIT", "dependencies": { "node-fetch": "^2.6.12" @@ -298,6 +300,7 @@ }, "node_modules/dotenv": { "version": "16.4.7", + "dev": true, "license": "BSD-2-Clause", "engines": { "node": ">=12" @@ -308,6 +311,7 @@ }, "node_modules/e2b": { "version": "1.0.5", + "dev": true, "license": "MIT", "dependencies": { "@bufbuild/protobuf": "^2.2.2", @@ -376,6 +380,7 @@ }, "node_modules/exa-js": { "version": "1.3.3", + "dev": true, "license": "MIT", "dependencies": { "cross-fetch": "^4.0.0" @@ -445,13 +450,6 @@ "node": ">= 0.8" } }, - "node_modules/human-id": { - "version": "4.1.1", - "license": "MIT", - "bin": { - "human-id": "dist/cli.js" - } - }, "node_modules/humanize-ms": { "version": "1.2.1", "license": "MIT", @@ -475,6 +473,7 @@ }, "node_modules/lodash": { "version": "4.17.21", + "dev": true, "license": "MIT" }, "node_modules/mime-db": { @@ -570,6 +569,7 @@ }, "node_modules/openapi-fetch": { "version": "0.9.8", + "dev": true, "license": "MIT", "dependencies": { "openapi-typescript-helpers": "^0.0.8" @@ -577,10 +577,12 @@ }, "node_modules/openapi-typescript-helpers": { "version": "0.0.8", + "dev": true, "license": "MIT" }, "node_modules/platform": { "version": "1.3.6", + "dev": true, "license": "MIT" }, "node_modules/raw-body": { @@ -719,6 +721,7 @@ }, "node_modules/zod-to-json-schema": { "version": "3.23.5", + "dev": true, "license": "ISC", "peerDependencies": { "zod": "^3.23.3" diff --git a/package.json b/package.json index 9dbd0c1..4648abf 100644 --- a/package.json +++ b/package.json @@ -9,9 +9,7 @@ ".": "./dist/index.js", "./*": "./dist/*" }, - "files": [ - "dist" - ], + "files": ["dist"], "scripts": { "build": "tsc", "build:all": "npm run build -ws --include-workspace-root", @@ -23,7 +21,7 @@ "dependencies": { "@anthropic-ai/sdk": "^0.32.1", "@icons-pack/react-simple-icons": "^10.2.0", - "@modelcontextprotocol/sdk": "^1.0.3", + "@modelcontextprotocol/sdk": "^1.1.1", "openai": "^4.0.0", "uuid": "^11.0.3" }, diff --git a/src/config.ts b/src/config.ts index 80fc785..4120a3b 100644 --- a/src/config.ts +++ b/src/config.ts @@ -1 +1,49 @@ -export const REGISTRY_URL = "https://registry.smithery.ai" +import { + ProgressTokenSchema, + RequestSchema, + ResultSchema, +} from "@modelcontextprotocol/sdk/types.js" +import { z } from "zod" + +// Copied from MCP +export const BaseRequestSchema = z + .object({ + _meta: z.optional( + z + .object({ + /** + * If specified, the caller is requesting out-of-band progress notifications for this request (as represented by notifications/progress). The value of this parameter is an opaque token that will be attached to any subsequent notifications. The receiver is not obligated to provide these notifications. + */ + progressToken: z.optional(ProgressTokenSchema), + }) + .passthrough(), + ), + }) + .passthrough() + +/** + * A custom method to set the configuration of the server deployed on Smithery. + * This must be called after initialization and before using the SSE server. + */ +export const ConfigRequestSchema = RequestSchema.extend({ + method: z.literal("config"), + params: BaseRequestSchema.extend({ + config: z.any(), + }), +}) + +export type ConfigRequest = z.infer + +/** + * A custom response schema to expected when creating a config request. + */ +export const ConfigResultSchema = ResultSchema.extend({ + error: z + .any() + .optional() + .describe( + "An object containing the error. If no error is present, it meanas the config succeeded.", + ), +}).describe("The result of a config request.") + +export type ConfigResult = z.infer diff --git a/src/examples/README.md b/src/examples/README.md index 1595015..2035153 100644 --- a/src/examples/README.md +++ b/src/examples/README.md @@ -26,7 +26,7 @@ This directory contains examples demonstrating how to use the Smithery SDK. 4. Run an example: ```bash # Run the simple example - npx tsx examples/simple.ts + npx tsx src/examples/simple.ts ``` ## Available Examples diff --git a/src/examples/shell.ts b/src/examples/shell.ts deleted file mode 100644 index 7e6555f..0000000 --- a/src/examples/shell.ts +++ /dev/null @@ -1,100 +0,0 @@ -import * as shellMcp from "@smithery/mcp-shell" -import dotenv from "dotenv" -import EventSource from "eventsource" -import { OpenAI } from "openai" -import type { ChatCompletionMessageParam } from "openai/resources/chat/index" -import { Connection } from "../index.js" -import { OpenAIHandler } from "../integrations/llm/openai.js" -import url from "node:url" -import readline from "node:readline" - -// Utility for human approval -async function getHumanApproval( - command: string, - args: string[], -): Promise { - const rl = readline.createInterface({ - input: process.stdin, - output: process.stdout, - }) - - return new Promise((resolve) => { - rl.question( - `Command: ${command} ${args.join(" ")}\nApprove? [y/N]: `, - (answer) => { - rl.close() - resolve(answer.toLowerCase() === "y") - }, - ) - }) -} - -// Patch event source -// eslint-disable-next-line @typescript-eslint/no-explicit-any -global.EventSource = EventSource as any - -async function main() { - dotenv.config() - - // Initialize the OpenAI client - const openai = new OpenAI() - - // Connect to MCPs - const connection = await Connection.connect({ - shell: shellMcp.createServer({ - allowedCommands: ["ls", "pwd", "date", "echo"], - approvalHandler: getHumanApproval, - }), - }) - - // Example conversation with tool usage - let isDone = false - const messages: ChatCompletionMessageParam[] = [ - { - role: "user", - content: "What's the date?", - }, - ] - - const handler = new OpenAIHandler(connection) - - while (!isDone) { - const response = await openai.chat.completions.create({ - model: "gpt-4o", - messages, - tools: await handler.listTools(), - }) - - // Handle tool calls - will prompt for approval during execution - const toolMessages = await handler.call(response) - messages.push(response.choices[0].message) - messages.push(...toolMessages) - isDone = toolMessages.length === 0 - console.log( - "Processing messages:", - messages.map((m) => ({ - role: m.role, - content: m.content, - tools: "tool_calls" in m ? m.tool_calls?.length : 0, - })), - ) - } - - // Print the final conversation - console.log("\nFinal conversation:") - messages.forEach((msg) => { - console.log(`\n${msg.role.toUpperCase()}:`) - console.log(msg.content) - if (msg.role === "assistant" && msg.tool_calls) { - console.log("Tool calls:", JSON.stringify(msg.tool_calls, null, 2)) - } - }) -} - -// Run the example -if (import.meta.url === url.pathToFileURL(process.argv[1]).href) { - main().catch((err) => { - console.error("Error:", err) - process.exit(1) - }) -} diff --git a/src/examples/simple.ts b/src/examples/simple.ts index 726c2db..e2159b9 100644 --- a/src/examples/simple.ts +++ b/src/examples/simple.ts @@ -1,15 +1,15 @@ import Anthropic from "@anthropic-ai/sdk" import type { PromptCachingBetaMessageParam } from "@anthropic-ai/sdk/src/resources/beta/prompt-caching/index.js" -import * as exa from "@smithery/mcp-exa" +import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js" import dotenv from "dotenv" import EventSource from "eventsource" +import { exit } from "node:process" import { OpenAI } from "openai" import type { ChatCompletionMessageParam } from "openai/resources/index.mjs" +import { type ConfigRequest, ConfigResultSchema } from "../config.js" import { MultiClient } from "../index.js" import { AnthropicChatAdapter } from "../integrations/llm/anthropic.js" import { OpenAIChatAdapter } from "../integrations/llm/openai.js" -import { createRegistryTransport } from "../registry.js" -import { exit } from "node:process" // Patch event source // eslint-disable-next-line @typescript-eslint/no-explicit-any global.EventSource = EventSource as any @@ -23,19 +23,37 @@ async function main() { const args = process.argv.slice(2) const useOpenAI = args.includes("--openai") - const exaServer = exa.createServer({ - apiKey: process.env.EXA_API_KEY as string, - }) - - const sequentialThinking = await createRegistryTransport( - "@modelcontextprotocol/server-sequential-thinking", + // Create a new connection + const exaTransport = new SSEClientTransport( + // Replace the URL to your deployed MCP. + new URL("https://exa-mcp-server-42082066756.us-central1.run.app/sse"), ) + + // Initialize a multi-client connection const client = new MultiClient() await client.connectAll({ - exa: exaServer, - sequentialThinking: sequentialThinking, + exa: exaTransport, + // You can add more connections here... }) + // Configure servers. Authenticate + const resp = await client.clients.exa.request( + { + method: "config", + params: { + config: { + apiKey: process.env.EXA_API_KEY, + }, + }, + } as ConfigRequest, + ConfigResultSchema, + ) + + if (resp.error) { + console.error("Failed to authenticate:", resp.error) + exit(1) + } + // Example conversation with tool usage let isDone = false @@ -71,7 +89,6 @@ async function main() { } else { const adapter = new AnthropicChatAdapter(client) const response = await chatState.llm.beta.promptCaching.messages.create({ - // model: "claude-3-5-haiku-20241022", model: "claude-3-5-sonnet-20241022", max_tokens: 64, messages: chatState.messages,