|
1 | 1 | #!/usr/bin/env node
|
2 |
| -import os from "os"; |
3 | 2 |
|
| 3 | +import os from "os"; |
4 | 4 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
5 | 5 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
6 | 6 | import {
|
7 | 7 | CallToolRequestSchema,
|
8 |
| - PromptMessage, |
9 | 8 | ListToolsRequestSchema,
|
10 |
| - ListPromptsRequestSchema, |
11 |
| - GetPromptRequestSchema, |
12 | 9 | } from "@modelcontextprotocol/sdk/types.js";
|
13 |
| -import { exec } from "node:child_process"; |
14 |
| -import { promisify } from "node:util"; |
15 | 10 | import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
|
16 | 11 | import { runCommand } from "./run-command.js";
|
17 | 12 |
|
18 | 13 | import { createRequire } from "module";
|
19 | 14 | import { verbose_log } from "./always_log.js";
|
| 15 | +import { registerPrompts } from "./prompts.js"; |
20 | 16 | const require = createRequire(import.meta.url);
|
21 | 17 | const {
|
22 | 18 | name: package_name,
|
23 | 19 | version: package_version,
|
24 | 20 | } = require("../package.json");
|
25 | 21 |
|
26 |
| -// TODO use .promises? in node api |
27 |
| -const execAsync = promisify(exec); |
28 |
| - |
29 | 22 | const server = new Server(
|
30 | 23 | {
|
31 | 24 | name: package_name,
|
@@ -92,82 +85,7 @@ server.setRequestHandler(
|
92 | 85 | }
|
93 | 86 | );
|
94 | 87 |
|
95 |
| -server.setRequestHandler(ListPromptsRequestSchema, async () => { |
96 |
| - verbose_log("INFO: ListPrompts"); |
97 |
| - return { |
98 |
| - prompts: [ |
99 |
| - // TODO! add prompts for various LLMs that tailor instructions to make them optimize use of run_command tool |
100 |
| - // idea is, users could insert those manually, or perhaps automatically if necessary, depending on context |
101 |
| - // that way you don't need one prompt for everything and certain models won't need any help (i.e. Claude) vs |
102 |
| - // llama4 which struggled with old run_script tool (now just stdin on run_command) so it might need some |
103 |
| - // special instructions and yeah... I think that's a use case for these prompts |
104 |
| - // /prompt llama4 ? |
105 |
| - { |
106 |
| - name: "run_command", |
107 |
| - description: |
108 |
| - "Include command output in the prompt. Instead of a tool call, the user decides what commands are relevant.", |
109 |
| - arguments: [ |
110 |
| - { |
111 |
| - name: "command", |
112 |
| - required: true, |
113 |
| - }, |
114 |
| - // if I care to keep the prompt tools then add stdin? |
115 |
| - ], |
116 |
| - }, |
117 |
| - ], |
118 |
| - }; |
119 |
| -}); |
120 |
| - |
121 |
| -server.setRequestHandler(GetPromptRequestSchema, async (request) => { |
122 |
| - if (request.params.name !== "run_command") { |
123 |
| - throw new Error("Unknown prompt"); |
124 |
| - } |
125 |
| - verbose_log("INFO: PromptRequest", request); |
126 |
| - |
127 |
| - const command = String(request.params.arguments?.command); |
128 |
| - if (!command) { |
129 |
| - throw new Error("Command is required"); |
130 |
| - } |
131 |
| - // Is it possible/feasible to pass a path for the workdir when running the command? |
132 |
| - // - currently it uses / (yikez) |
133 |
| - // - IMO makes more sense to have it be based on the Zed workdir of each project |
134 |
| - // - Fallback could be to configure on server level (i.e. home dir of current user) - perhaps CLI arg? (thinking of zed's context_servers config section) |
135 |
| - |
136 |
| - const { stdout, stderr } = await execAsync(command); |
137 |
| - // TODO gracefully handle errors and turn them into a prompt message that can be used by LLM to troubleshoot the issue, currently errors result in nothing inserted into the prompt and instead it shows the Zed's chat panel as a failure |
138 |
| - |
139 |
| - const messages: PromptMessage[] = [ |
140 |
| - { |
141 |
| - role: "user", |
142 |
| - content: { |
143 |
| - type: "text", |
144 |
| - text: |
145 |
| - "I ran the following command, if there is any output it will be shown below:\n" + |
146 |
| - command, |
147 |
| - }, |
148 |
| - }, |
149 |
| - ]; |
150 |
| - if (stdout) { |
151 |
| - messages.push({ |
152 |
| - role: "user", |
153 |
| - content: { |
154 |
| - type: "text", |
155 |
| - text: "STDOUT:\n" + stdout, |
156 |
| - }, |
157 |
| - }); |
158 |
| - } |
159 |
| - if (stderr) { |
160 |
| - messages.push({ |
161 |
| - role: "user", |
162 |
| - content: { |
163 |
| - type: "text", |
164 |
| - text: "STDERR:\n" + stderr, |
165 |
| - }, |
166 |
| - }); |
167 |
| - } |
168 |
| - verbose_log("INFO: PromptResponse", messages); |
169 |
| - return { messages }; |
170 |
| -}); |
| 88 | +registerPrompts(server); |
171 | 89 |
|
172 | 90 | async function main() {
|
173 | 91 | const transport = new StdioServerTransport();
|
|
0 commit comments