diff --git a/README.md b/README.md index 6bab1f6..bb7ddaf 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,7 @@ Restart OpenCode. The plugin will automatically load. │ │ │ Hooks: │ │ ┌─────────────────┐ ┌────────────────────┐ │ -│ │ chat.params │─────▶│ Context Injection │ │ +│ │ chat.message │─────▶│ Context Injection │ │ │ │ (pre-LLM) │ │ - Golden Rules │ │ │ │ │ │ - Past Learnings │ │ │ │ │ │ - Heuristics │ │ diff --git a/package-lock.json b/package-lock.json index 0e37efe..e049e66 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "opencode-elf", - "version": "0.5.3", + "version": "0.6.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "opencode-elf", - "version": "0.5.3", + "version": "0.6.0", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0", diff --git a/package.json b/package.json index cad4f94..f9fc39b 100644 --- a/package.json +++ b/package.json @@ -51,7 +51,7 @@ "opencode": { "type": "plugin", "hooks": [ - "chat.params", + "chat.message", "event" ] }, diff --git a/src/index.ts b/src/index.ts index 6896333..34cacf7 100644 --- a/src/index.ts +++ b/src/index.ts @@ -75,18 +75,21 @@ export const ELFPlugin: Plugin = async ({ directory }: PluginInput) => { // 3. Return hooks immediately return { /** - * Chat params hook - Inject ELF context before the LLM processes the message + * Chat message hook - Inject ELF context into system prompt before LLM processing */ - "chat.params": async (params: Record) => { + "chat.message": async (input, output) => { const start = Date.now(); try { // Wait for init to finish (only affects the very first message) await ensureReady(); - const input = params.input as Record | undefined; - const message = input?.message as Record | undefined; - const userMessage = message?.text as string | undefined; + // Extract user text from message parts (TextParts have type "text" and a text field) + const userMessage = output.parts + .filter((p): p is Extract => p.type === "text") + .map(p => p.text) + .join("\n") + .trim(); if (!userMessage) return; @@ -110,13 +113,11 @@ export const ELFPlugin: Plugin = async ({ directory }: PluginInput) => { context.relevantLearnings.length > 0 || context.heuristics.length > 0) { - // Inject into system prompt or prepend to user message - const systemPrompt = input?.systemPrompt as string | undefined; - if (systemPrompt && input) { - input.systemPrompt = `${systemPrompt}\n\n${elfMemory}`; - } else if (message) { - message.text = `${elfMemory}\n\n${userMessage}`; - } + // Inject into the system prompt via output.message.system + const currentSystem = output.message.system || ""; + output.message.system = currentSystem + ? `${currentSystem}\n\n${elfMemory}` + : elfMemory; // Record metrics const duration = Date.now() - start; @@ -129,7 +130,7 @@ export const ELFPlugin: Plugin = async ({ directory }: PluginInput) => { } } catch (error) { // Fail open: If ELF fails, log it but don't break the user's chat - console.error("ELF: Error in chat.params hook", error); + console.error("ELF: Error in chat.message hook", error); } },