-
Notifications
You must be signed in to change notification settings - Fork 15.1k
feat(tui): add experimental next-prompt suggestion #20309
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: dev
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,6 +20,7 @@ import { Plugin } from "../plugin" | |
| import PROMPT_PLAN from "../session/prompt/plan.txt" | ||
| import BUILD_SWITCH from "../session/prompt/build-switch.txt" | ||
| import MAX_STEPS from "../session/prompt/max-steps.txt" | ||
| import PROMPT_SUGGEST_NEXT from "../session/prompt/suggest-next.txt" | ||
| import { ToolRegistry } from "../tool/registry" | ||
| import { Runner } from "@/effect/runner" | ||
| import { MCP } from "../mcp" | ||
|
|
@@ -243,6 +244,77 @@ export namespace SessionPrompt { | |
| ) | ||
| }) | ||
|
|
||
| const suggest = Effect.fn("SessionPrompt.suggest")(function* (input: { | ||
| session: Session.Info | ||
| sessionID: SessionID | ||
| message: MessageV2.WithParts | ||
| }) { | ||
| if (input.session.parentID) return | ||
| const message = input.message.info | ||
| if (message.role !== "assistant") return | ||
| if (message.error) return | ||
| if (!message.finish) return | ||
| if (["tool-calls", "unknown"].includes(message.finish)) return | ||
| if ((yield* status.get(input.sessionID)).type !== "idle") return | ||
|
|
||
| const ag = yield* agents.get("title") | ||
| if (!ag) return | ||
|
|
||
| const model = yield* Effect.promise(async () => { | ||
| const small = await Provider.getSmallModel(message.providerID).catch(() => undefined) | ||
| if (small) return small | ||
| return Provider.getModel(message.providerID, message.modelID).catch(() => undefined) | ||
| }) | ||
| if (!model) return | ||
|
|
||
| const msgs = yield* Effect.promise(() => MessageV2.filterCompacted(MessageV2.stream(input.sessionID))) | ||
| const history = msgs.slice(-8) | ||
| const real = (item: MessageV2.WithParts) => | ||
| item.info.role === "user" && !item.parts.every((part) => "synthetic" in part && part.synthetic) | ||
| const parent = msgs.find((item) => item.info.id === message.parentID) | ||
| const user = parent && real(parent) ? parent.info : msgs.findLast((item) => real(item))?.info | ||
| if (!user || user.role !== "user") return | ||
|
|
||
| const text = yield* Effect.promise(async (signal) => { | ||
| const result = await LLM.stream({ | ||
| agent: { | ||
| ...ag, | ||
| name: "suggest-next", | ||
| prompt: PROMPT_SUGGEST_NEXT, | ||
| }, | ||
| user, | ||
| system: [], | ||
| small: true, | ||
| tools: {}, | ||
| model, | ||
| abort: signal, | ||
| sessionID: input.sessionID, | ||
| retries: 1, | ||
| toolChoice: "none", | ||
| messages: await MessageV2.toModelMessages(history, model), | ||
| }) | ||
| return result.text | ||
| }) | ||
|
|
||
| const line = text | ||
| .replace(/<think>[\s\S]*?<\/think>\s*/g, "") | ||
| .split("\n") | ||
| .map((item) => item.trim()) | ||
| .find((item) => item.length > 0) | ||
| ?.replace(/^["'`]+|["'`]+$/g, "") | ||
| if (!line) return | ||
|
|
||
| const tag = line | ||
| .toUpperCase() | ||
| .replace(/[\s-]+/g, "_") | ||
| .replace(/[^A-Z_]/g, "") | ||
| if (tag === "NO_SUGGESTION") return | ||
|
|
||
| const suggestion = line.length > 240 ? line.slice(0, 237) + "..." : line | ||
| if ((yield* status.get(input.sessionID)).type !== "idle") return | ||
| yield* status.set(input.sessionID, { type: "idle", suggestion }) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. firing an |
||
| }) | ||
|
|
||
| const insertReminders = Effect.fn("SessionPrompt.insertReminders")(function* (input: { | ||
| messages: MessageV2.WithParts[] | ||
| agent: Agent.Info | ||
|
|
@@ -1313,7 +1385,15 @@ NOTE: At any point in time through this workflow you should feel free to ask the | |
| } | ||
|
|
||
| if (input.noReply === true) return message | ||
| return yield* loop({ sessionID: input.sessionID }) | ||
| const result = yield* loop({ sessionID: input.sessionID }) | ||
| if (Flag.OPENCODE_EXPERIMENTAL_NEXT_PROMPT) { | ||
| yield* suggest({ | ||
| session, | ||
| sessionID: input.sessionID, | ||
| message: result, | ||
| }).pipe(Effect.ignore, Effect.forkIn(scope)) | ||
| } | ||
| return result | ||
| }, | ||
| ) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,21 @@ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| You are generating a suggested next user message for the current conversation. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| Goal: | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Suggest a useful next step that keeps momentum. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| Rules: | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Output exactly one line. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Write as the user speaking to the assistant (for example: "Can you...", "Help me...", "Let's..."). | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Match the user's tone and language; keep it natural and human. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Prefer a concrete action over a broad question. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - If the conversation is vague or small-talk, steer toward a practical starter request. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - If there is no meaningful or appropriate next step to suggest, output exactly: NO_SUGGESTION | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Avoid corporate or robotic phrasing. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Avoid asking multiple discovery questions in one sentence. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Do not include quotes, labels, markdown, or explanations. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| Examples: | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Greeting context -> "Can you scan this repo and suggest the best first task to tackle?" | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Bug-fix context -> "Can you reproduce this bug and propose the smallest safe fix?" | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Feature context -> "Let's implement this incrementally; start with the MVP version first." | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - Conversation is complete -> "NO_SUGGESTION" | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+1
to
+21
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
clean room inspiration from tried & tested. alternatively, add ability to change this prompt via settings/agents? i think this could be a separate "prediction" built in agent that can have a system prompt override |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
your approach gets the last 8 messages and sends it as a separate query - which will have 0% cache hit.
if we could follow Claude Code's approach and send the entire chat history (including the tools, toolChoice, maxOutputTokens, system prompt, basically everything), and append the prompt prediction instruction as a user prompt at the end, we will get a FULL cache hit.
This gives a super cheap request with really good context for the agent to make a smart prediction.