Skip to content

Commit 84cb986

Browse files
committed
split out prompts
1 parent f893e0b commit 84cb986

File tree

2 files changed

+94
-85
lines changed

2 files changed

+94
-85
lines changed

src/index.ts

Lines changed: 3 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,24 @@
11
#!/usr/bin/env node
2-
import os from "os";
32

3+
import os from "os";
44
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
55
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
66
import {
77
CallToolRequestSchema,
8-
PromptMessage,
98
ListToolsRequestSchema,
10-
ListPromptsRequestSchema,
11-
GetPromptRequestSchema,
129
} from "@modelcontextprotocol/sdk/types.js";
13-
import { exec } from "node:child_process";
14-
import { promisify } from "node:util";
1510
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
1611
import { runCommand } from "./run-command.js";
1712

1813
import { createRequire } from "module";
1914
import { verbose_log } from "./always_log.js";
15+
import { registerPrompts } from "./prompts.js";
2016
const require = createRequire(import.meta.url);
2117
const {
2218
name: package_name,
2319
version: package_version,
2420
} = require("../package.json");
2521

26-
// TODO use .promises? in node api
27-
const execAsync = promisify(exec);
28-
2922
const server = new Server(
3023
{
3124
name: package_name,
@@ -92,82 +85,7 @@ server.setRequestHandler(
9285
}
9386
);
9487

95-
server.setRequestHandler(ListPromptsRequestSchema, async () => {
96-
verbose_log("INFO: ListPrompts");
97-
return {
98-
prompts: [
99-
// TODO! add prompts for various LLMs that tailor instructions to make them optimize use of run_command tool
100-
// idea is, users could insert those manually, or perhaps automatically if necessary, depending on context
101-
// that way you don't need one prompt for everything and certain models won't need any help (i.e. Claude) vs
102-
// llama4 which struggled with old run_script tool (now just stdin on run_command) so it might need some
103-
// special instructions and yeah... I think that's a use case for these prompts
104-
// /prompt llama4 ?
105-
{
106-
name: "run_command",
107-
description:
108-
"Include command output in the prompt. Instead of a tool call, the user decides what commands are relevant.",
109-
arguments: [
110-
{
111-
name: "command",
112-
required: true,
113-
},
114-
// if I care to keep the prompt tools then add stdin?
115-
],
116-
},
117-
],
118-
};
119-
});
120-
121-
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
122-
if (request.params.name !== "run_command") {
123-
throw new Error("Unknown prompt");
124-
}
125-
verbose_log("INFO: PromptRequest", request);
126-
127-
const command = String(request.params.arguments?.command);
128-
if (!command) {
129-
throw new Error("Command is required");
130-
}
131-
// Is it possible/feasible to pass a path for the workdir when running the command?
132-
// - currently it uses / (yikez)
133-
// - IMO makes more sense to have it be based on the Zed workdir of each project
134-
// - Fallback could be to configure on server level (i.e. home dir of current user) - perhaps CLI arg? (thinking of zed's context_servers config section)
135-
136-
const { stdout, stderr } = await execAsync(command);
137-
// TODO gracefully handle errors and turn them into a prompt message that can be used by LLM to troubleshoot the issue, currently errors result in nothing inserted into the prompt and instead it shows the Zed's chat panel as a failure
138-
139-
const messages: PromptMessage[] = [
140-
{
141-
role: "user",
142-
content: {
143-
type: "text",
144-
text:
145-
"I ran the following command, if there is any output it will be shown below:\n" +
146-
command,
147-
},
148-
},
149-
];
150-
if (stdout) {
151-
messages.push({
152-
role: "user",
153-
content: {
154-
type: "text",
155-
text: "STDOUT:\n" + stdout,
156-
},
157-
});
158-
}
159-
if (stderr) {
160-
messages.push({
161-
role: "user",
162-
content: {
163-
type: "text",
164-
text: "STDERR:\n" + stderr,
165-
},
166-
});
167-
}
168-
verbose_log("INFO: PromptResponse", messages);
169-
return { messages };
170-
});
88+
registerPrompts(server);
17189

17290
async function main() {
17391
const transport = new StdioServerTransport();

src/prompts.ts

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
2+
import {
3+
GetPromptRequestSchema,
4+
ListPromptsRequestSchema,
5+
PromptMessage,
6+
} from "@modelcontextprotocol/sdk/types.js";
7+
import { verbose_log } from "./always_log.js";
8+
9+
import { exec } from "node:child_process";
10+
import { promisify } from "node:util";
11+
const execAsync = promisify(exec);
12+
// TODO use .promises? in node api
13+
14+
export function registerPrompts(server: Server) {
15+
server.setRequestHandler(ListPromptsRequestSchema, async () => {
16+
verbose_log("INFO: ListPrompts");
17+
return {
18+
prompts: [
19+
// TODO! add prompts for various LLMs that tailor instructions to make them optimize use of run_command tool
20+
// idea is, users could insert those manually, or perhaps automatically if necessary, depending on context
21+
// that way you don't need one prompt for everything and certain models won't need any help (i.e. Claude) vs
22+
// llama4 which struggled with old run_script tool (now just stdin on run_command) so it might need some
23+
// special instructions and yeah... I think that's a use case for these prompts
24+
// /prompt llama4 ?
25+
{
26+
name: "run_command",
27+
description:
28+
"Include command output in the prompt. Instead of a tool call, the user decides what commands are relevant.",
29+
arguments: [
30+
{
31+
name: "command",
32+
required: true,
33+
},
34+
// if I care to keep the prompt tools then add stdin?
35+
],
36+
},
37+
],
38+
};
39+
});
40+
41+
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
42+
if (request.params.name !== "run_command") {
43+
throw new Error("Unknown prompt");
44+
}
45+
verbose_log("INFO: PromptRequest", request);
46+
47+
const command = String(request.params.arguments?.command);
48+
if (!command) {
49+
throw new Error("Command is required");
50+
}
51+
// Is it possible/feasible to pass a path for the workdir when running the command?
52+
// - currently it uses / (yikez)
53+
// - IMO makes more sense to have it be based on the Zed workdir of each project
54+
// - Fallback could be to configure on server level (i.e. home dir of current user) - perhaps CLI arg? (thinking of zed's context_servers config section)
55+
56+
const { stdout, stderr } = await execAsync(command);
57+
// TODO gracefully handle errors and turn them into a prompt message that can be used by LLM to troubleshoot the issue, currently errors result in nothing inserted into the prompt and instead it shows the Zed's chat panel as a failure
58+
59+
const messages: PromptMessage[] = [
60+
{
61+
role: "user",
62+
content: {
63+
type: "text",
64+
text:
65+
"I ran the following command, if there is any output it will be shown below:\n" +
66+
command,
67+
},
68+
},
69+
];
70+
if (stdout) {
71+
messages.push({
72+
role: "user",
73+
content: {
74+
type: "text",
75+
text: "STDOUT:\n" + stdout,
76+
},
77+
});
78+
}
79+
if (stderr) {
80+
messages.push({
81+
role: "user",
82+
content: {
83+
type: "text",
84+
text: "STDERR:\n" + stderr,
85+
},
86+
});
87+
}
88+
verbose_log("INFO: PromptResponse", messages);
89+
return { messages };
90+
});
91+
}

0 commit comments

Comments
 (0)