From 066c4f3cab2bdc0d859c9768361add16fb6fc8b2 Mon Sep 17 00:00:00 2001 From: Val Alexander Date: Sat, 25 Apr 2026 07:55:29 -0500 Subject: [PATCH] Add prompt enhancement controls and GPT-5.5 support - add rewrite prompt enhancement in the compact composer menu - hide Codex fast mode when the model/backend cannot use it - register GPT-5.5 and GPT-5.5 Mini across model catalogs --- apps/server/src/provider/providerCatalog.ts | 34 ++++++ apps/web/src/components/ChatView.tsx | 10 +- .../chat/ClaudeTraitsPicker.browser.tsx | 21 ++++ .../components/chat/ClaudeTraitsPicker.tsx | 16 ++- .../chat/CodexTraitsPicker.browser.tsx | 54 +++++++++- .../src/components/chat/CodexTraitsPicker.tsx | 102 ++++++++++++------ .../CompactComposerControlsMenu.browser.tsx | 37 ++++++- .../chat/CompactComposerControlsMenu.tsx | 43 +++++++- .../chat/composerProviderRegistry.test.tsx | 49 +++++++++ .../chat/composerProviderRegistry.tsx | 43 +++++--- apps/web/src/promptEnhancement.test.ts | 25 ++++- apps/web/src/promptEnhancement.ts | 26 +++++ bun.lock | 10 +- packages/contracts/src/model.ts | 11 ++ packages/shared/src/model.test.ts | 80 ++++++++++++++ packages/shared/src/model.ts | 51 ++++++++- 16 files changed, 548 insertions(+), 64 deletions(-) diff --git a/apps/server/src/provider/providerCatalog.ts b/apps/server/src/provider/providerCatalog.ts index 57a19e817..cead5f7de 100644 --- a/apps/server/src/provider/providerCatalog.ts +++ b/apps/server/src/provider/providerCatalog.ts @@ -15,6 +15,38 @@ const noCapabilities = null; export const BUILT_IN_PROVIDER_MODELS: Record> = { codex: [ + { + slug: "gpt-5.5", + name: "GPT-5.5", + capabilities: { + reasoningEffortLevels: [ + { value: "low", label: "Low" }, + { value: "medium", label: "Medium" }, + { value: "high", label: "High", isDefault: true }, + { value: "xhigh", label: "Extra High" }, + ], + supportsFastMode: true, + supportsThinkingToggle: false, + contextWindowOptions: [], + promptInjectedEffortLevels: [], + }, + }, + { + slug: "gpt-5.5-mini", + name: "GPT-5.5 Mini", + capabilities: { + reasoningEffortLevels: [ + { value: "low", label: "Low" }, + { value: "medium", label: "Medium" }, + { value: "high", label: "High", isDefault: true }, + { value: "xhigh", label: "Extra High" }, + ], + supportsFastMode: true, + supportsThinkingToggle: false, + contextWindowOptions: [], + promptInjectedEffortLevels: [], + }, + }, { slug: "gpt-5.4", name: "GPT-5.4", @@ -117,6 +149,8 @@ export const BUILT_IN_PROVIDER_MODELS: Record getComposerProviderState({ @@ -946,8 +947,9 @@ export default function ChatView({ model: selectedModel, prompt, modelOptions: draftModelOptions, + codexBackendId, }), - [draftModelOptions, prompt, selectedModel, selectedProvider], + [codexBackendId, draftModelOptions, prompt, selectedModel, selectedProvider], ); const selectedPromptEffort = composerProviderState.promptEffort; const selectedModelOptionsForDispatch = composerProviderState.modelOptionsForDispatch; @@ -4407,12 +4409,14 @@ export default function ChatView({ threadId, model: selectedModel, onPromptChange: setPromptFromTraits, + codexBackendId, }); const providerTraitsPicker = renderProviderTraitsPicker({ provider: selectedProvider, threadId, model: selectedModel, onPromptChange: setPromptFromTraits, + codexBackendId, }); const onEnvModeChange = useCallback( (mode: DraftThreadEnvMode) => { @@ -5512,6 +5516,10 @@ export default function ChatView({ planSidebarOpen={planSidebarOpen} runtimeMode={runtimeMode} traitsMenuContent={providerTraitsMenuContent} + promptEnhancement={composerPromptEnhancement} + promptEnhancementAvailable={pendingUserInputs.length === 0} + promptEnhancementBusy={isEnhancingPrompt} + onPromptEnhancementChange={onPromptEnhancementChange} onInteractionModeChange={handleInteractionModeChange} onTogglePlanSidebar={togglePlanSidebar} onToggleRuntimeMode={toggleRuntimeMode} diff --git a/apps/web/src/components/chat/ClaudeTraitsPicker.browser.tsx b/apps/web/src/components/chat/ClaudeTraitsPicker.browser.tsx index 96f667903..26c0fe361 100644 --- a/apps/web/src/components/chat/ClaudeTraitsPicker.browser.tsx +++ b/apps/web/src/components/chat/ClaudeTraitsPicker.browser.tsx @@ -176,6 +176,27 @@ describe("ClaudeTraitsPicker", () => { } }); + it("checks the Ultrathink option when ultrathink is active in the prompt", async () => { + const mounted = await mountPicker({ + model: "claude-opus-4-6", + prompt: "Ultrathink:\nInvestigate this", + }); + + try { + await page.getByRole("button").click(); + + const ultrathinkItem = page.getByRole("menuitemradio", { name: "Ultrathink" }); + await vi.waitFor(() => { + expect(ultrathinkItem.element().getAttribute("aria-checked")).toBe("true"); + }); + + const highItem = page.getByRole("menuitemradio", { name: "High (default)" }); + expect(highItem.element().getAttribute("aria-checked")).toBe("false"); + } finally { + await mounted.cleanup(); + } + }); + it("persists sticky claude model options when traits change", async () => { const mounted = await mountPicker({ model: "claude-opus-4-6", diff --git a/apps/web/src/components/chat/ClaudeTraitsPicker.tsx b/apps/web/src/components/chat/ClaudeTraitsPicker.tsx index 474d2c319..7b994063a 100644 --- a/apps/web/src/components/chat/ClaudeTraitsPicker.tsx +++ b/apps/web/src/components/chat/ClaudeTraitsPicker.tsx @@ -46,7 +46,13 @@ function getSelectedClaudeTraits( prompt: string, modelOptions: ClaudeModelOptions | null | undefined, ): { - effort: Exclude | null; + /** + * The effort value to mirror in the radio group. When ultrathink is currently + * active (because the prompt carries the `ultrathink` keyword), this is + * `"ultrathink"` so that the radio group draws a checkmark next to the + * Ultrathink option, matching how every other thinking level renders. + */ + effort: ClaudeCodeEffort | null; thinkingEnabled: boolean | null; fastModeEnabled: boolean; options: ReadonlyArray; @@ -59,7 +65,7 @@ function getSelectedClaudeTraits( "ultrathink" >; const resolvedEffort = resolveReasoningEffortForProvider(PROVIDER, modelOptions?.effort); - const effort = + const baseEffort: Exclude | null = resolvedEffort && resolvedEffort !== "ultrathink" && options.includes(resolvedEffort) ? resolvedEffort : options.includes(defaultReasoningEffort) @@ -69,13 +75,15 @@ function getSelectedClaudeTraits( ? (modelOptions?.thinking ?? true) : null; const supportsFastMode = supportsClaudeFastMode(model); + const ultrathinkPromptControlled = + supportsClaudeUltrathinkKeyword(model) && isClaudeUltrathinkPrompt(prompt); + const effort: ClaudeCodeEffort | null = ultrathinkPromptControlled ? "ultrathink" : baseEffort; return { effort, thinkingEnabled, fastModeEnabled: supportsFastMode && modelOptions?.fastMode === true, options, - ultrathinkPromptControlled: - supportsClaudeUltrathinkKeyword(model) && isClaudeUltrathinkPrompt(prompt), + ultrathinkPromptControlled, supportsFastMode, }; } diff --git a/apps/web/src/components/chat/CodexTraitsPicker.browser.tsx b/apps/web/src/components/chat/CodexTraitsPicker.browser.tsx index 413c51c0a..9fabba776 100644 --- a/apps/web/src/components/chat/CodexTraitsPicker.browser.tsx +++ b/apps/web/src/components/chat/CodexTraitsPicker.browser.tsx @@ -11,11 +11,14 @@ import { COMPOSER_DRAFT_STORAGE_KEY, useComposerDraftStore } from "../../compose async function mountPicker(props: { reasoningEffort?: "low" | "medium" | "high" | "xhigh"; fastModeEnabled: boolean; + model?: string | null; + backendId?: string | null; }) { const threadId = ThreadId.makeUnsafe("thread-codex-traits"); const draftsByThreadId = {} as ReturnType< typeof useComposerDraftStore.getState >["draftsByThreadId"]; + const model = props.model === undefined ? "gpt-5.4" : props.model; draftsByThreadId[threadId] = { prompt: "", attachments: [], @@ -25,7 +28,7 @@ async function mountPicker(props: { promptEnhancement: null, promptEnhancementOriginalPrompt: null, provider: "codex", - model: null, + model, modelOptions: { codex: { ...(props.reasoningEffort ? { reasoningEffort: props.reasoningEffort } : {}), @@ -44,7 +47,10 @@ async function mountPicker(props: { }); const host = document.createElement("div"); document.body.append(host); - const screen = await render(, { container: host }); + const screen = await render( + , + { container: host }, + ); return { cleanup: async () => { @@ -168,7 +174,10 @@ describe("CodexTraitsPicker", () => { const host = document.createElement("div"); document.body.append(host); - const screen = await render(, { container: host }); + const screen = await render( + , + { container: host }, + ); try { await useComposerDraftStore.persist.rehydrate(); @@ -187,4 +196,43 @@ describe("CodexTraitsPicker", () => { host.remove(); } }); + + it("hides fast mode controls when the model does not support priority service tier", async () => { + const mounted = await mountPicker({ + fastModeEnabled: false, + model: "gpt-5.3-codex", + }); + + try { + await page.getByRole("button").click(); + + await vi.waitFor(() => { + const text = document.body.textContent ?? ""; + expect(text).toContain("Reasoning"); + expect(text).not.toContain("Fast Mode"); + }); + } finally { + await mounted.cleanup(); + } + }); + + it("hides fast mode controls when a non-OpenAI codex backend is selected", async () => { + const mounted = await mountPicker({ + fastModeEnabled: false, + model: "gpt-5.4", + backendId: "ollama", + }); + + try { + await page.getByRole("button").click(); + + await vi.waitFor(() => { + const text = document.body.textContent ?? ""; + expect(text).toContain("Reasoning"); + expect(text).not.toContain("Fast Mode"); + }); + } finally { + await mounted.cleanup(); + } + }); }); diff --git a/apps/web/src/components/chat/CodexTraitsPicker.tsx b/apps/web/src/components/chat/CodexTraitsPicker.tsx index 462a343f5..26610b50d 100644 --- a/apps/web/src/components/chat/CodexTraitsPicker.tsx +++ b/apps/web/src/components/chat/CodexTraitsPicker.tsx @@ -9,6 +9,7 @@ import { getReasoningEffortOptions, normalizeCodexModelOptions, resolveReasoningEffortForProvider, + supportsCodexFastMode, } from "@okcode/shared/model"; import { memo, useState } from "react"; import { ChevronDownIcon } from "lucide-react"; @@ -33,26 +34,43 @@ const CODEX_REASONING_LABELS: Record = { xhigh: "Extra High", }; -function getSelectedCodexTraits(modelOptions: CodexModelOptions | null | undefined): { +function getSelectedCodexTraits(input: { + modelOptions: CodexModelOptions | null | undefined; + model: string | null | undefined; + backendId: string | null | undefined; +}): { effort: CodexReasoningEffort; fastModeEnabled: boolean; + fastModeAvailable: boolean; } { const defaultReasoningEffort = getDefaultReasoningEffort(PROVIDER); + const fastModeAvailable = supportsCodexFastMode(input.model, input.backendId); return { effort: - resolveReasoningEffortForProvider(PROVIDER, modelOptions?.reasoningEffort) ?? + resolveReasoningEffortForProvider(PROVIDER, input.modelOptions?.reasoningEffort) ?? defaultReasoningEffort, - fastModeEnabled: modelOptions?.fastMode === true, + fastModeEnabled: fastModeAvailable && input.modelOptions?.fastMode === true, + fastModeAvailable, }; } -function CodexTraitsMenuContentImpl(props: { threadId: ThreadId }) { +interface CodexTraitsContextProps { + threadId: ThreadId; + model: string | null | undefined; + backendId: string | null | undefined; +} + +function CodexTraitsMenuContentImpl(props: CodexTraitsContextProps) { const draft = useComposerThreadDraft(props.threadId); const modelOptions = draft.modelOptions?.[PROVIDER]; const setProviderModelOptions = useComposerDraftStore((store) => store.setProviderModelOptions); const options = getReasoningEffortOptions(PROVIDER); const defaultReasoningEffort = getDefaultReasoningEffort(PROVIDER); - const { effort, fastModeEnabled } = getSelectedCodexTraits(modelOptions); + const { effort, fastModeEnabled, fastModeAvailable } = getSelectedCodexTraits({ + modelOptions, + model: props.model, + backendId: props.backendId, + }); return ( <> @@ -67,10 +85,13 @@ function CodexTraitsMenuContentImpl(props: { threadId: ThreadId }) { setProviderModelOptions( props.threadId, PROVIDER, - normalizeCodexModelOptions({ - ...modelOptions, - reasoningEffort: nextEffort, - }), + normalizeCodexModelOptions( + { + ...modelOptions, + reasoningEffort: nextEffort, + }, + { model: props.model, backendId: props.backendId }, + ), { persistSticky: true }, ); }} @@ -83,37 +104,48 @@ function CodexTraitsMenuContentImpl(props: { threadId: ThreadId }) { ))} - - -
Fast Mode
- { - setProviderModelOptions( - props.threadId, - PROVIDER, - normalizeCodexModelOptions({ - ...modelOptions, - fastMode: value === "on", - }), - { persistSticky: true }, - ); - }} - > - off - on - -
+ {fastModeAvailable ? ( + <> + + +
Fast Mode
+ { + setProviderModelOptions( + props.threadId, + PROVIDER, + normalizeCodexModelOptions( + { + ...modelOptions, + fastMode: value === "on", + }, + { model: props.model, backendId: props.backendId }, + ), + { persistSticky: true }, + ); + }} + > + off + on + +
+ + ) : null} ); } export const CodexTraitsMenuContent = memo(CodexTraitsMenuContentImpl); -export const CodexTraitsPicker = memo(function CodexTraitsPicker(props: { threadId: ThreadId }) { +export const CodexTraitsPicker = memo(function CodexTraitsPicker(props: CodexTraitsContextProps) { const [isMenuOpen, setIsMenuOpen] = useState(false); const modelOptions = useComposerThreadDraft(props.threadId).modelOptions?.codex; - const { effort, fastModeEnabled } = getSelectedCodexTraits(modelOptions); + const { effort, fastModeEnabled } = getSelectedCodexTraits({ + modelOptions, + model: props.model, + backendId: props.backendId, + }); const triggerLabel = [CODEX_REASONING_LABELS[effort], ...(fastModeEnabled ? ["Fast"] : [])] .filter(Boolean) .join(" · "); @@ -140,7 +172,11 @@ export const CodexTraitsPicker = memo(function CodexTraitsPicker(props: { thread - + ); diff --git a/apps/web/src/components/chat/CompactComposerControlsMenu.browser.tsx b/apps/web/src/components/chat/CompactComposerControlsMenu.browser.tsx index 7d6cc69c8..a8f07d4f2 100644 --- a/apps/web/src/components/chat/CompactComposerControlsMenu.browser.tsx +++ b/apps/web/src/components/chat/CompactComposerControlsMenu.browser.tsx @@ -15,6 +15,7 @@ async function mountMenu(props?: { prompt?: string; provider?: "codex" | "claudeAgent"; modelOptions?: ProviderModelOptions | null; + withPromptEnhancement?: boolean; }) { const threadId = ThreadId.makeUnsafe("thread-compact-menu"); const provider = props?.provider ?? "claudeAgent"; @@ -43,6 +44,7 @@ async function mountMenu(props?: { const host = document.createElement("div"); document.body.append(host); const onPromptChange = vi.fn(); + const onPromptEnhancementChange = vi.fn(); const screen = await render( + ) : ( ) } + {...(props?.withPromptEnhancement + ? { + promptEnhancement: null, + promptEnhancementAvailable: true, + promptEnhancementBusy: false, + onPromptEnhancementChange, + } + : {})} onInteractionModeChange={vi.fn()} onTogglePlanSidebar={vi.fn()} onToggleRuntimeMode={vi.fn()} @@ -68,6 +82,7 @@ async function mountMenu(props?: { ); return { + onPromptEnhancementChange, cleanup: async () => { await screen.unmount(); host.remove(); @@ -185,4 +200,24 @@ describe("CompactComposerControlsMenu", () => { await mounted.cleanup(); } }); + + it("exposes prompt enhancements directly inside the compact controls menu", async () => { + const mounted = await mountMenu({ withPromptEnhancement: true }); + + try { + await page.getByLabelText("More composer controls").click(); + + await vi.waitFor(() => { + const text = document.body.textContent ?? ""; + expect(text).toContain("Prompt enhancement"); + expect(text).toContain("Full rewrite"); + expect(text).toContain("Add specificity"); + }); + + await page.getByRole("menuitem", { name: /Full rewrite/ }).click(); + expect(mounted.onPromptEnhancementChange).toHaveBeenCalledWith("rewrite"); + } finally { + await mounted.cleanup(); + } + }); }); diff --git a/apps/web/src/components/chat/CompactComposerControlsMenu.tsx b/apps/web/src/components/chat/CompactComposerControlsMenu.tsx index 1a7b5a4ee..d5e34a246 100644 --- a/apps/web/src/components/chat/CompactComposerControlsMenu.tsx +++ b/apps/web/src/components/chat/CompactComposerControlsMenu.tsx @@ -1,6 +1,6 @@ import { ProviderInteractionMode, RuntimeMode } from "@okcode/contracts"; import { memo, type ReactNode } from "react"; -import { EllipsisIcon, ListTodoIcon } from "lucide-react"; +import { CheckIcon, EllipsisIcon, ListTodoIcon, SparklesIcon } from "lucide-react"; import { Button } from "../ui/button"; import { Menu, @@ -11,6 +11,7 @@ import { MenuSeparator as MenuDivider, MenuTrigger, } from "../ui/menu"; +import { PROMPT_ENHANCEMENTS, type PromptEnhancementId } from "../../promptEnhancement"; export const CompactComposerControlsMenu = memo(function CompactComposerControlsMenu(props: { activePlan: boolean; @@ -18,10 +19,17 @@ export const CompactComposerControlsMenu = memo(function CompactComposerControls planSidebarOpen: boolean; runtimeMode: RuntimeMode; traitsMenuContent?: ReactNode; + promptEnhancement?: PromptEnhancementId | null; + promptEnhancementAvailable?: boolean; + promptEnhancementBusy?: boolean; + onPromptEnhancementChange?: (next: PromptEnhancementId | null) => void | Promise; onInteractionModeChange: (mode: ProviderInteractionMode) => void; onTogglePlanSidebar: () => void; onToggleRuntimeMode: () => void; }) { + const showPromptEnhancement = + typeof props.onPromptEnhancementChange === "function" && + props.promptEnhancementAvailable !== false; return ( ) : null} + {showPromptEnhancement ? ( + <> + +
+
+ {PROMPT_ENHANCEMENTS.map((enhancement) => { + const isSelected = props.promptEnhancement === enhancement.id; + return ( + { + void props.onPromptEnhancementChange?.(isSelected ? null : enhancement.id); + }} + > + {isSelected ? ( + + ) : ( + + )} + + {enhancement.label} + + {enhancement.description} + + + + ); + })} + + ) : null}
); diff --git a/apps/web/src/components/chat/composerProviderRegistry.test.tsx b/apps/web/src/components/chat/composerProviderRegistry.test.tsx index 139876d6f..2a7031c4d 100644 --- a/apps/web/src/components/chat/composerProviderRegistry.test.tsx +++ b/apps/web/src/components/chat/composerProviderRegistry.test.tsx @@ -146,4 +146,53 @@ describe("getComposerProviderState", () => { modelOptionsForDispatch: undefined, }); }); + + it("drops codex fastMode dispatch when the selected backend cannot honor it", () => { + const state = getComposerProviderState({ + provider: "codex", + model: "gpt-5.4", + prompt: "", + codexBackendId: "ollama", + modelOptions: { + codex: { + reasoningEffort: "xhigh", + fastMode: true, + }, + }, + }); + + expect(state).toEqual({ + provider: "codex", + promptEffort: "xhigh", + modelOptionsForDispatch: { + codex: { + reasoningEffort: "xhigh", + }, + }, + }); + }); + + it("preserves codex fastMode dispatch for the GPT-5.5 model on the OpenAI backend", () => { + const state = getComposerProviderState({ + provider: "codex", + model: "gpt-5.5", + prompt: "", + codexBackendId: "openai", + modelOptions: { + codex: { + fastMode: true, + }, + }, + }); + + expect(state).toEqual({ + provider: "codex", + promptEffort: "high", + modelOptionsForDispatch: { + codex: { + fastMode: true, + }, + }, + }); + }); }); diff --git a/apps/web/src/components/chat/composerProviderRegistry.tsx b/apps/web/src/components/chat/composerProviderRegistry.tsx index c4c3e263e..1e9da508f 100644 --- a/apps/web/src/components/chat/composerProviderRegistry.tsx +++ b/apps/web/src/components/chat/composerProviderRegistry.tsx @@ -22,6 +22,7 @@ export type ComposerProviderStateInput = { model: ModelSlug; prompt: string; modelOptions: ProviderModelOptions | null | undefined; + codexBackendId?: string | null | undefined; }; export type ComposerProviderState = { @@ -33,27 +34,29 @@ export type ComposerProviderState = { modelPickerIconClassName?: string; }; +export type RenderTraitsInput = { + threadId: ThreadId; + model: ModelSlug; + onPromptChange: (prompt: string) => void; + codexBackendId?: string | null | undefined; +}; + type ProviderRegistryEntry = { getState: (input: ComposerProviderStateInput) => ComposerProviderState; - renderTraitsMenuContent: (input: { - threadId: ThreadId; - model: ModelSlug; - onPromptChange: (prompt: string) => void; - }) => ReactNode; - renderTraitsPicker: (input: { - threadId: ThreadId; - model: ModelSlug; - onPromptChange: (prompt: string) => void; - }) => ReactNode; + renderTraitsMenuContent: (input: RenderTraitsInput) => ReactNode; + renderTraitsPicker: (input: RenderTraitsInput) => ReactNode; }; const composerProviderRegistry: Record = { codex: { - getState: ({ modelOptions }) => { + getState: ({ model, modelOptions, codexBackendId }) => { const promptEffort = resolveReasoningEffortForProvider("codex", modelOptions?.codex?.reasoningEffort) ?? getDefaultReasoningEffort("codex"); - const normalizedCodexOptions = normalizeCodexModelOptions(modelOptions?.codex); + const normalizedCodexOptions = normalizeCodexModelOptions(modelOptions?.codex, { + model, + backendId: codexBackendId, + }); return { provider: "codex", @@ -63,8 +66,16 @@ const composerProviderRegistry: Record = { : undefined, }; }, - renderTraitsMenuContent: ({ threadId }) => , - renderTraitsPicker: ({ threadId }) => , + renderTraitsMenuContent: ({ threadId, model, codexBackendId }) => ( + + ), + renderTraitsPicker: ({ threadId, model, codexBackendId }) => ( + + ), }, claudeAgent: { getState: ({ model, prompt, modelOptions }) => { @@ -159,11 +170,13 @@ export function renderProviderTraitsMenuContent(input: { threadId: ThreadId; model: ModelSlug; onPromptChange: (prompt: string) => void; + codexBackendId?: string | null | undefined; }): ReactNode { return composerProviderRegistry[input.provider].renderTraitsMenuContent({ threadId: input.threadId, model: input.model, onPromptChange: input.onPromptChange, + codexBackendId: input.codexBackendId ?? null, }); } @@ -172,10 +185,12 @@ export function renderProviderTraitsPicker(input: { threadId: ThreadId; model: ModelSlug; onPromptChange: (prompt: string) => void; + codexBackendId?: string | null | undefined; }): ReactNode { return composerProviderRegistry[input.provider].renderTraitsPicker({ threadId: input.threadId, model: input.model, onPromptChange: input.onPromptChange, + codexBackendId: input.codexBackendId ?? null, }); } diff --git a/apps/web/src/promptEnhancement.test.ts b/apps/web/src/promptEnhancement.test.ts index 91b5565ac..6fe838610 100644 --- a/apps/web/src/promptEnhancement.test.ts +++ b/apps/web/src/promptEnhancement.test.ts @@ -1,6 +1,10 @@ import { describe, expect, it } from "vitest"; -import { enhancePrompt } from "./promptEnhancement"; +import { + enhancePrompt, + getPromptEnhancementById, + PROMPT_ENHANCEMENT_IDS, +} from "./promptEnhancement"; describe("enhancePrompt", () => { it("adds visible structure for specificity enhancements", () => { @@ -14,4 +18,23 @@ describe("enhancePrompt", () => { "fix the selected button state.", ); }); + + it("rewrites the prompt around goal, approach, and definition of done", () => { + const result = enhancePrompt("fix the selected button state", "rewrite"); + expect(result).toContain("Goal:"); + expect(result).toContain("- fix the selected button state."); + expect(result).toContain("Approach:"); + expect(result).toContain("Definition of done:"); + }); + + it("returns an empty string when rewriting empty input", () => { + expect(enhancePrompt(" ", "rewrite")).toBe(""); + }); +}); + +describe("PROMPT_ENHANCEMENTS metadata", () => { + it("exposes a Full rewrite enhancement at the top of the list", () => { + expect(PROMPT_ENHANCEMENT_IDS[0]).toBe("rewrite"); + expect(getPromptEnhancementById("rewrite")?.label).toBe("Full rewrite"); + }); }); diff --git a/apps/web/src/promptEnhancement.ts b/apps/web/src/promptEnhancement.ts index 983963b11..45f6a4e9a 100644 --- a/apps/web/src/promptEnhancement.ts +++ b/apps/web/src/promptEnhancement.ts @@ -1,4 +1,5 @@ export const PROMPT_ENHANCEMENT_IDS = [ + "rewrite", "specificity", "clarity", "constraints", @@ -17,6 +18,16 @@ export interface PromptEnhancementDefinition { } export const PROMPT_ENHANCEMENTS: readonly PromptEnhancementDefinition[] = [ + { + id: "rewrite", + label: "Full rewrite", + description: "Restructure the prompt around goal, approach, and validation", + guidance: [ + "Lead with a single-sentence goal that captures the user's actual intent.", + "Translate the request into a concrete approach, an explicit definition of done, and a short validation checklist.", + "Preserve the original scope and compatibility expectations — do not invent new requirements.", + ], + }, { id: "specificity", label: "Add specificity", @@ -117,6 +128,21 @@ export function enhancePrompt( } switch (enhancementId) { + case "rewrite": + return [ + "Goal:", + `- ${normalizedPrompt}`, + "", + "Approach:", + "- Identify the most direct way to satisfy this goal without widening scope.", + "- Replace vague phrasing with concrete, observable behavior on the relevant surface.", + "- Preserve compatibility with the existing flow unless the goal explicitly says otherwise.", + "", + "Definition of done:", + "- The user-visible outcome is explicit and predictable.", + "- The most likely edge cases are handled.", + "- Tests are added or updated when they meaningfully exercise the change.", + ].join("\n"); case "specificity": return [ normalizedPrompt, diff --git a/bun.lock b/bun.lock index 3249a5f7f..359aaeec3 100644 --- a/bun.lock +++ b/bun.lock @@ -19,7 +19,7 @@ }, "apps/desktop": { "name": "@okcode/desktop", - "version": "0.26.3", + "version": "0.26.4", "dependencies": { "effect": "catalog:", "electron": "40.6.0", @@ -103,7 +103,7 @@ }, "apps/mobile": { "name": "@okcode/mobile", - "version": "0.26.3", + "version": "0.26.4", "dependencies": { "@capacitor/android": "^8.3.1", "@capacitor/app": "^8.1.0", @@ -123,7 +123,7 @@ }, "apps/server": { "name": "okcodes", - "version": "0.26.3", + "version": "0.26.4", "bin": { "okcode": "./dist/index.cjs", }, @@ -156,7 +156,7 @@ }, "apps/web": { "name": "@okcode/web", - "version": "0.26.3", + "version": "0.26.4", "dependencies": { "@base-ui/react": "^1.2.0", "@codemirror/language": "^6.12.3", @@ -219,7 +219,7 @@ }, "packages/contracts": { "name": "@okcode/contracts", - "version": "0.26.3", + "version": "0.26.4", "dependencies": { "effect": "catalog:", }, diff --git a/packages/contracts/src/model.ts b/packages/contracts/src/model.ts index 092773e80..329d25a7c 100644 --- a/packages/contracts/src/model.ts +++ b/packages/contracts/src/model.ts @@ -60,6 +60,8 @@ type ModelOption = { export const MODEL_OPTIONS_BY_PROVIDER = { codex: [ + { slug: "gpt-5.5", name: "GPT-5.5" }, + { slug: "gpt-5.5-mini", name: "GPT-5.5 Mini" }, { slug: "gpt-5.4", name: "GPT-5.4" }, { slug: "gpt-5.4-mini", name: "GPT-5.4 Mini" }, { slug: "gpt-5.3-codex", name: "GPT-5.3 Codex" }, @@ -75,6 +77,8 @@ export const MODEL_OPTIONS_BY_PROVIDER = { ], openclaw: [], copilot: [ + { slug: "gpt-5.5", name: "GPT-5.5" }, + { slug: "gpt-5.5-mini", name: "GPT-5.5 mini" }, { slug: "gpt-5.4", name: "GPT-5.4" }, { slug: "gpt-5.4-mini", name: "GPT-5.4 mini" }, { slug: "gpt-5.3-codex", name: "GPT-5.3-Codex" }, @@ -121,6 +125,9 @@ export const DEFAULT_GIT_TEXT_GENERATION_MODEL = "gpt-5.4-mini" as const; export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record> = { codex: { + "5.5": "gpt-5.5", + "5.5-mini": "gpt-5.5-mini", + "gpt-5.5-mini": "gpt-5.5-mini", "5.4": "gpt-5.4", "5.3": "gpt-5.3-codex", "gpt-5.3": "gpt-5.3-codex", @@ -159,6 +166,10 @@ export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record { @@ -64,6 +65,15 @@ describe("normalizeModelSlug", () => { expect(normalizeModelSlug("claude-haiku-4-5-20251001", "claudeAgent")).toBe("claude-haiku-4-5"); }); + it("resolves GPT-5.5 aliases for codex and copilot", () => { + expect(normalizeModelSlug("5.5", "codex")).toBe("gpt-5.5"); + expect(normalizeModelSlug("5.5-mini", "codex")).toBe("gpt-5.5-mini"); + expect(normalizeModelSlug("5.5", "copilot")).toBe("gpt-5.5"); + expect(normalizeModelSlug("gpt-5.5-mini", "copilot")).toBe("gpt-5.5-mini"); + expect(resolveModelSlug("gpt-5.5")).toBe("gpt-5.5"); + expect(resolveModelSlug("gpt-5.5-mini")).toBe("gpt-5.5-mini"); + }); + it("accepts Anthropic-prefixed Claude model slugs", () => { expect(normalizeModelSlug("anthropic/claude-sonnet-4-6", "claudeAgent")).toBe( "claude-sonnet-4-6", @@ -344,6 +354,76 @@ describe("normalizeCodexModelOptions", () => { fastMode: true, }); }); + + it("drops fastMode when the model+backend cannot support priority service tier", () => { + expect( + normalizeCodexModelOptions( + { reasoningEffort: "xhigh", fastMode: true }, + { model: "gpt-5.4", backendId: "ollama" }, + ), + ).toEqual({ reasoningEffort: "xhigh" }); + + expect( + normalizeCodexModelOptions( + { reasoningEffort: "high", fastMode: true }, + { model: "gpt-5.3-codex", backendId: "openai" }, + ), + ).toBeUndefined(); + }); + + it("preserves fastMode for fast-mode capable models on the OpenAI backend", () => { + expect( + normalizeCodexModelOptions( + { reasoningEffort: "xhigh", fastMode: true }, + { model: "gpt-5.5", backendId: "openai" }, + ), + ).toEqual({ reasoningEffort: "xhigh", fastMode: true }); + + expect( + normalizeCodexModelOptions( + { reasoningEffort: "high", fastMode: true }, + { model: "gpt-5.5-mini", backendId: null }, + ), + ).toEqual({ fastMode: true }); + }); +}); + +describe("supportsCodexFastMode", () => { + it("enables fast mode for newer GPT-5.x models on the OpenAI backend", () => { + expect(supportsCodexFastMode("gpt-5.5")).toBe(true); + expect(supportsCodexFastMode("gpt-5.5-mini")).toBe(true); + expect(supportsCodexFastMode("gpt-5.4")).toBe(true); + expect(supportsCodexFastMode("gpt-5.4-mini")).toBe(true); + expect(supportsCodexFastMode("gpt-5.5", "openai")).toBe(true); + expect(supportsCodexFastMode("gpt-5.5", null)).toBe(true); + expect(supportsCodexFastMode("gpt-5.5", undefined)).toBe(true); + }); + + it("rejects older models that do not support priority service tier", () => { + expect(supportsCodexFastMode("gpt-5.3-codex")).toBe(false); + expect(supportsCodexFastMode("gpt-5.3-codex-spark")).toBe(false); + expect(supportsCodexFastMode("gpt-5.2")).toBe(false); + expect(supportsCodexFastMode("gpt-5.2-codex")).toBe(false); + }); + + it("rejects fast mode when the codex backend is not OpenAI", () => { + expect(supportsCodexFastMode("gpt-5.5", "ollama")).toBe(false); + expect(supportsCodexFastMode("gpt-5.5", "lmstudio")).toBe(false); + expect(supportsCodexFastMode("gpt-5.4", "azure")).toBe(false); + expect(supportsCodexFastMode("gpt-5.4", "openrouter")).toBe(false); + expect(supportsCodexFastMode("gpt-5.4-mini", "groq")).toBe(false); + }); + + it("rejects fast mode for missing or empty model slugs", () => { + expect(supportsCodexFastMode(undefined)).toBe(false); + expect(supportsCodexFastMode(null)).toBe(false); + expect(supportsCodexFastMode("")).toBe(false); + expect(supportsCodexFastMode(" ")).toBe(false); + }); + + it("treats whitespace-only backend IDs as the implicit OpenAI default", () => { + expect(supportsCodexFastMode("gpt-5.5", " ")).toBe(true); + }); }); describe("normalizeClaudeModelOptions", () => { diff --git a/packages/shared/src/model.ts b/packages/shared/src/model.ts index c7d083a71..f457128be 100644 --- a/packages/shared/src/model.ts +++ b/packages/shared/src/model.ts @@ -33,6 +33,21 @@ const CLAUDE_OPUS_4_6_MODEL = "claude-opus-4-6"; const CLAUDE_SONNET_4_6_MODEL = "claude-sonnet-4-6"; const CLAUDE_HAIKU_4_5_MODEL = "claude-haiku-4-5"; +const CODEX_FAST_MODE_MODELS: ReadonlySet = new Set([ + "gpt-5.5", + "gpt-5.5-mini", + "gpt-5.4", + "gpt-5.4-mini", +]); + +/** + * The Codex `serviceTier: "fast"` (priority service tier) is only available on + * the official OpenAI backend for a curated subset of newer GPT-5.x models. + * Selecting a local backend (Ollama, LM Studio) or any third-party Codex + * backend means the request never hits OpenAI, so fast mode does not apply. + */ +const CODEX_FAST_MODE_BACKEND_ID = "openai"; + export interface SelectableModelOption { slug: string; name: string; @@ -51,6 +66,32 @@ export function supportsClaudeFastMode(model: string | null | undefined): boolea return normalized === CLAUDE_OPUS_4_7_MODEL || normalized === CLAUDE_OPUS_4_6_MODEL; } +/** + * Codex fast mode is only genuinely available when: + * 1. The codex backend is OpenAI's hosted endpoint (priority service tier + * is an OpenAI-platform feature, not something local backends or + * third-party gateways can honor), and + * 2. The selected model is one of the GPT-5.x slugs that supports the + * priority/`serviceTier: "fast"` request. + * + * Pass `null` or `undefined` for `backendId` to indicate "use the implicit + * OpenAI default backend" — that is treated as the OpenAI backend. + */ +export function supportsCodexFastMode( + model: string | null | undefined, + backendId?: string | null | undefined, +): boolean { + const normalized = normalizeModelSlug(model, "codex"); + if (!normalized || !CODEX_FAST_MODE_MODELS.has(normalized)) { + return false; + } + const trimmedBackend = typeof backendId === "string" ? backendId.trim() : ""; + if (trimmedBackend.length === 0) { + return true; + } + return trimmedBackend === CODEX_FAST_MODE_BACKEND_ID; +} + export function supportsClaudeAdaptiveReasoning(model: string | null | undefined): boolean { const normalized = normalizeModelSlug(model, "claudeAgent"); return ( @@ -330,12 +371,20 @@ export function resolveClaudeUltrathinkSdkConfig( export function normalizeCodexModelOptions( modelOptions: CodexModelOptions | null | undefined, + context?: { model?: string | null | undefined; backendId?: string | null | undefined }, ): CodexModelOptions | undefined { const defaultReasoningEffort = getDefaultReasoningEffort("codex"); const reasoningEffort = resolveReasoningEffortForProvider("codex", modelOptions?.reasoningEffort) ?? defaultReasoningEffort; - const fastModeEnabled = modelOptions?.fastMode === true; + // When no model/backend context is provided, preserve legacy behavior of + // honoring whatever the caller stored. Once context is supplied, fast mode + // is only retained when the model+backend genuinely support it. + const fastModeRequested = modelOptions?.fastMode === true; + const fastModeEnabled = + fastModeRequested && + (context === undefined || + supportsCodexFastMode(context.model ?? undefined, context.backendId ?? undefined)); const nextOptions: CodexModelOptions = { ...(reasoningEffort !== defaultReasoningEffort ? { reasoningEffort } : {}), ...(fastModeEnabled ? { fastMode: true } : {}),