diff --git a/.changeset/debug-logging-adapters.md b/.changeset/debug-logging-adapters.md new file mode 100644 index 000000000..172e0eec8 --- /dev/null +++ b/.changeset/debug-logging-adapters.md @@ -0,0 +1,13 @@ +--- +'@tanstack/ai-openai': patch +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +'@tanstack/ai-ollama': patch +'@tanstack/ai-openrouter': patch +'@tanstack/ai-grok': patch +'@tanstack/ai-groq': patch +'@tanstack/ai-elevenlabs': patch +'@tanstack/ai-fal': patch +--- + +Wire each adapter's text, summarize, image, speech, transcription, and video paths through the new `InternalLogger` from `@tanstack/ai/adapter-internals`: `logger.request(...)` before each SDK call, `logger.provider(...)` for every chunk received, and `logger.errors(...)` in catch blocks. Migrates all pre-existing ad-hoc `console.*` calls in adapter catch blocks (including the OpenAI and ElevenLabs realtime adapters) onto the structured logger. No adapter factory or config-shape changes. diff --git a/.changeset/debug-logging-ai.md b/.changeset/debug-logging-ai.md new file mode 100644 index 000000000..846d79e37 --- /dev/null +++ b/.changeset/debug-logging-ai.md @@ -0,0 +1,20 @@ +--- +'@tanstack/ai': minor +--- + +**Pluggable debug logging across every activity.** `chat`, `summarize`, `generateImage`, `generateVideo`, `generateSpeech`, and `generateTranscription` now accept a `debug?: DebugOption` that turns on structured per-category logs (`request`, `provider`, `output`, `middleware`, `tools`, `agentLoop`, `config`, `errors`). + +```ts +chat({ adapter, messages, debug: true }) // all categories on +chat({ adapter, messages, debug: false }) // silent (incl. errors) +chat({ adapter, messages, debug: { middleware: false } }) // all except middleware +chat({ adapter, messages, debug: { logger: pino } }) // route to a custom logger +``` + +Additions: + +- New `Logger` interface (`debug` / `info` / `warn` / `error`) and default `ConsoleLogger` that routes to matching `console.*` methods and prints nested `meta` via `console.dir(meta, { depth: null, colors: true })` so streamed provider payloads render in full. +- New `DebugCategories` / `DebugConfig` / `DebugOption` public types. +- New internal `@tanstack/ai/adapter-internals` subpath export exposing `InternalLogger` + `resolveDebugOption` so provider adapters can thread logging without leaking internals on the public surface. +- Each log line is prefixed with an emoji + `[tanstack-ai:]` tag so categories are visually distinguishable in dense streams. Errors log unconditionally unless explicitly silenced. +- `TextEngine`, `MiddlewareRunner`, and every activity entry point thread a resolved `InternalLogger` through the pipeline — no globals, concurrent calls stay independent. diff --git a/docs/advanced/debug-logging.md b/docs/advanced/debug-logging.md new file mode 100644 index 000000000..ed7952fcd --- /dev/null +++ b/docs/advanced/debug-logging.md @@ -0,0 +1,143 @@ +--- +title: Debug Logging +id: debug-logging +order: 3 +description: "Turn on structured, category-toggleable debug logging to see every chunk, middleware transform, and tool call inside TanStack AI." +keywords: + - tanstack ai + - debug + - logging + - logger + - pino + - troubleshooting + - chunks + - middleware debugging +--- + +# Debug Logging + +You have a `chat()` that isn't behaving as expected — a missing chunk, a middleware that doesn't seem to fire, a tool call with wrong args. By the end of this guide, you'll have turned on debug logging and will see every chunk, middleware transform, and tool call flowing through your call. + +## Turn it on + +Add `debug: true` to any activity call: + +```typescript +import { chat } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +const stream = chat({ + adapter: openaiText("gpt-4o"), + messages: [{ role: "user", content: "Hello" }], + debug: true, +}); +``` + +Every internal event now prints to the console with a `[tanstack-ai:]` prefix: + +``` +[tanstack-ai:request] activity=chat provider=openai model=gpt-4o messages=1 tools=0 stream=true +[tanstack-ai:agentLoop] run started +[tanstack-ai:provider] provider=openai type=response.output_text.delta +[tanstack-ai:output] type=TEXT_MESSAGE_CONTENT +... +``` + +## Narrow what's printed + +Pass a `DebugConfig` object instead of `true`. Every unspecified category defaults to `true`, so toggle by setting specific flags to `false`: + +```typescript +chat({ + adapter: openaiText("gpt-4o"), + messages, + debug: { middleware: false }, // everything except middleware +}); +``` + +If you want to see ONLY a specific set of categories, set the rest to `false` explicitly. Errors default to `true` — keep them on unless you really want total silence: + +```typescript +chat({ + adapter: openaiText("gpt-4o"), + messages, + debug: { + provider: true, + output: true, + middleware: false, + tools: false, + agentLoop: false, + config: false, + errors: true, // keep errors on — they're cheap and important + request: false, + }, +}); +``` + +## Pipe into your own logger + +Pass a `Logger` implementation and all debug output flows through it instead of `console`: + +```typescript +import type { Logger } from "@tanstack/ai"; +import pino from "pino"; + +const pinoLogger = pino(); +const logger: Logger = { + debug: (msg, meta) => pinoLogger.debug(meta, msg), + info: (msg, meta) => pinoLogger.info(meta, msg), + warn: (msg, meta) => pinoLogger.warn(meta, msg), + error: (msg, meta) => pinoLogger.error(meta, msg), +}; + +chat({ + adapter: openaiText("gpt-4o"), + messages, + debug: { logger }, // all categories on, piped to pino +}); +``` + +The default logger is exported as `ConsoleLogger` if you want to wrap it: + +```typescript +import { ConsoleLogger } from "@tanstack/ai"; +``` + +## Categories reference + +| Category | Logs | Applies to | +|----------|------|------------| +| `request` | Outgoing call to a provider (model, message count, tool count) | All activities | +| `provider` | Every raw chunk/frame received from a provider SDK | Streaming activities (chat, realtime) | +| `output` | Every chunk or result yielded to the caller | All activities | +| `middleware` | Inputs and outputs around every middleware hook | `chat()` only | +| `tools` | Before/after tool call execution | `chat()` only | +| `agentLoop` | Agent-loop iterations and phase transitions | `chat()` only | +| `config` | Config transforms returned by middleware `onConfig` hooks | `chat()` only | +| `errors` | Every caught error anywhere in the pipeline | All activities | + +## Errors are always logged + +Errors flow through the logger unconditionally — even when you omit `debug`: + +```typescript +chat({ adapter, messages }); // still prints [tanstack-ai:errors] ... on failure +``` + +To fully silence (including errors), set `debug: false` or `debug: { errors: false }`. Errors also always reach the caller via thrown exceptions or `RUN_ERROR` stream chunks — the logger is additive, not the only surface. + +## Non-chat activities + +The same `debug` option works on every activity: + +```typescript +summarize({ adapter, text, debug: true }); +generateImage({ adapter, prompt: "a cat", debug: { logger } }); +generateSpeech({ adapter, text, debug: { request: true } }); +``` + +The chat-only categories (`middleware`, `tools`, `agentLoop`, `config`) simply never fire for these activities because those concepts don't exist in their pipelines. + +## Related + +If you're building middleware and want to see chunks flow through it, `debug: { middleware: true }` is faster than writing a logging middleware. See [Middleware](./middleware) for writing your own middleware, or [Observability](./observability) for the programmatic event client. diff --git a/docs/advanced/extend-adapter.md b/docs/advanced/extend-adapter.md index a1317323d..145432b21 100644 --- a/docs/advanced/extend-adapter.md +++ b/docs/advanced/extend-adapter.md @@ -1,7 +1,7 @@ --- title: Extend Adapter id: extend-adapter -order: 7 +order: 8 description: "Extend TanStack AI adapter factories with custom model IDs and fine-tuned models while keeping full type safety for input modalities and provider options." keywords: - tanstack ai diff --git a/docs/advanced/middleware.md b/docs/advanced/middleware.md index 1e568f3a7..91de53d95 100644 --- a/docs/advanced/middleware.md +++ b/docs/advanced/middleware.md @@ -49,6 +49,9 @@ const stream = chat({ }); ``` +> **Just want to see chunks flowing through your middleware during development?** +> Use `debug: { middleware: true }` on your `chat()` call — no custom middleware required. See [Debug Logging](./debug-logging). + ## Lifecycle Overview Every `chat()` invocation follows a predictable lifecycle. Middleware hooks fire at specific phases: diff --git a/docs/advanced/multimodal-content.md b/docs/advanced/multimodal-content.md index 5aa10b81a..f30301e1b 100644 --- a/docs/advanced/multimodal-content.md +++ b/docs/advanced/multimodal-content.md @@ -1,7 +1,7 @@ --- title: Multimodal Content id: multimodal-content -order: 3 +order: 4 description: "Send images, audio, video, and documents alongside text in TanStack AI messages with typed ContentPart primitives for multimodal models." keywords: - tanstack ai diff --git a/docs/advanced/observability.md b/docs/advanced/observability.md index b838dd973..a9f9b0d54 100644 --- a/docs/advanced/observability.md +++ b/docs/advanced/observability.md @@ -19,6 +19,9 @@ The `@tanstack/ai` package offers you an event client for observability and debu It's a fully type-safe decoupled event-driven system that emits events whenever they are internally triggered and you can subscribe to those events for observability. +> **Looking for quick diagnostic console output instead of a programmatic event stream?** +> See [Debug Logging](./debug-logging) for turning on category-toggleable logging across every adapter and middleware hook. + Because the same event client is used for both the TanStack Devtools system and observability locally it will work by subscribing to the event bus and emitting events to/from the event bus into the listeners by default. If you want to subscribe to events in production as well you need to pass in a third argument to the `on` function, diff --git a/docs/advanced/per-model-type-safety.md b/docs/advanced/per-model-type-safety.md index b667ea0a9..27468ac38 100644 --- a/docs/advanced/per-model-type-safety.md +++ b/docs/advanced/per-model-type-safety.md @@ -1,7 +1,7 @@ --- title: Per-Model Type Safety id: per-model-type-safety -order: 4 +order: 5 description: "TanStack AI narrows modelOptions and content types to the specific model you select, enforcing capabilities at compile time." keywords: - tanstack ai diff --git a/docs/advanced/runtime-adapter-switching.md b/docs/advanced/runtime-adapter-switching.md index a23d33f65..ba13debb3 100644 --- a/docs/advanced/runtime-adapter-switching.md +++ b/docs/advanced/runtime-adapter-switching.md @@ -1,7 +1,7 @@ --- title: Runtime Adapter Switching id: runtime-adapter-switching -order: 5 +order: 6 description: "Let users switch between LLM providers at runtime in TanStack AI while keeping full TypeScript type safety for each adapter's model options." keywords: - tanstack ai diff --git a/docs/advanced/tree-shaking.md b/docs/advanced/tree-shaking.md index c0ec0a89e..51b6162e6 100644 --- a/docs/advanced/tree-shaking.md +++ b/docs/advanced/tree-shaking.md @@ -1,7 +1,7 @@ --- title: Tree-Shaking id: tree-shaking -order: 6 +order: 7 description: "TanStack AI's tree-shakeable architecture — import only the activities and adapters you use for minimal bundle size across chat, image, and speech." keywords: - tanstack ai diff --git a/docs/config.json b/docs/config.json index 698d18545..5afef2be3 100644 --- a/docs/config.json +++ b/docs/config.json @@ -163,6 +163,10 @@ "label": "Middleware", "to": "advanced/middleware" }, + { + "label": "Debug Logging", + "to": "advanced/debug-logging" + }, { "label": "Observability", "to": "advanced/observability" diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts index bc88721b8..cdd9fe66f 100644 --- a/packages/typescript/ai-anthropic/src/adapters/summarize.ts +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -54,36 +54,52 @@ export class AnthropicSummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) - const response = await this.client.messages.create({ + logger.request(`activity=summarize provider=anthropic`, { + provider: 'anthropic', model: options.model, - messages: [{ role: 'user', content: options.text }], - system: systemPrompt, - max_tokens: options.maxLength || 500, - temperature: 0.3, - stream: false, }) - const content = response.content - .map((c) => (c.type === 'text' ? c.text : '')) - .join('') - - return { - id: response.id, - model: response.model, - summary: content, - usage: { - promptTokens: response.usage.input_tokens, - completionTokens: response.usage.output_tokens, - totalTokens: response.usage.input_tokens + response.usage.output_tokens, - }, + try { + const response = await this.client.messages.create({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + system: systemPrompt, + max_tokens: options.maxLength || 500, + temperature: 0.3, + stream: false, + }) + + const content = response.content + .map((c) => (c.type === 'text' ? c.text : '')) + .join('') + + return { + id: response.id, + model: response.model, + summary: content, + usage: { + promptTokens: response.usage.input_tokens, + completionTokens: response.usage.output_tokens, + totalTokens: + response.usage.input_tokens + response.usage.output_tokens, + }, + } + } catch (error) { + logger.errors('anthropic.summarize fatal', { + error, + source: 'anthropic.summarize', + }) + throw error } } async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) const id = generateId(this.name) const model = options.model @@ -91,50 +107,68 @@ export class AnthropicSummarizeAdapter< let inputTokens = 0 let outputTokens = 0 - const stream = await this.client.messages.create({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - system: systemPrompt, - max_tokens: options.maxLength || 500, - temperature: 0.3, + logger.request(`activity=summarize provider=anthropic`, { + provider: 'anthropic', + model, stream: true, }) - for await (const event of stream) { - if (event.type === 'message_start') { - inputTokens = event.message.usage.input_tokens - } else if (event.type === 'content_block_delta') { - if (event.delta.type === 'text_delta') { - const delta = event.delta.text - accumulatedContent += delta + try { + const stream = await this.client.messages.create({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + system: systemPrompt, + max_tokens: options.maxLength || 500, + temperature: 0.3, + stream: true, + }) + + for await (const event of stream) { + logger.provider(`provider=anthropic type=${event.type}`, { + chunk: event, + }) + + if (event.type === 'message_start') { + inputTokens = event.message.usage.input_tokens + } else if (event.type === 'content_block_delta') { + if (event.delta.type === 'text_delta') { + const delta = event.delta.text + accumulatedContent += delta + yield asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: id, + model, + timestamp: Date.now(), + delta, + content: accumulatedContent, + }) + } + } else if (event.type === 'message_delta') { + outputTokens = event.usage.output_tokens yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: id, + type: 'RUN_FINISHED', + runId: id, model, timestamp: Date.now(), - delta, - content: accumulatedContent, + finishReason: event.delta.stop_reason as + | 'stop' + | 'length' + | 'content_filter' + | null, + usage: { + promptTokens: inputTokens, + completionTokens: outputTokens, + totalTokens: inputTokens + outputTokens, + }, }) } - } else if (event.type === 'message_delta') { - outputTokens = event.usage.output_tokens - yield asChunk({ - type: 'RUN_FINISHED', - runId: id, - model, - timestamp: Date.now(), - finishReason: event.delta.stop_reason as - | 'stop' - | 'length' - | 'content_filter' - | null, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, - }) } + } catch (error) { + logger.errors('anthropic.summarize fatal', { + error, + source: 'anthropic.summarize', + }) + throw error } } diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index c88c1159f..3a040bbc7 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -15,6 +15,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type { Base64ImageSource, Base64PDFSource, @@ -123,9 +124,14 @@ export class AnthropicTextAdapter< async *chatStream( options: TextOptions, ): AsyncIterable { + const { logger } = options try { const requestParams = this.mapCommonOptionsToAnthropic(options) + logger.request( + `activity=chat provider=anthropic model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'anthropic', model: this.model }, + ) const stream = await this.client.beta.messages.create( { ...requestParams, stream: true }, { @@ -134,11 +140,18 @@ export class AnthropicTextAdapter< }, ) - yield* this.processAnthropicStream(stream, options, () => - generateId(this.name), + yield* this.processAnthropicStream( + stream, + options, + () => generateId(this.name), + logger, ) } catch (error: unknown) { const err = error as Error & { status?: number; code?: string } + logger.errors('anthropic.chatStream fatal', { + error, + source: 'anthropic.chatStream', + }) yield asChunk({ type: 'RUN_ERROR', model: options.model, @@ -163,6 +176,7 @@ export class AnthropicTextAdapter< options: StructuredOutputOptions, ): Promise> { const { chatOptions, outputSchema } = options + const { logger } = chatOptions const requestParams = this.mapCommonOptionsToAnthropic(chatOptions) @@ -180,6 +194,10 @@ export class AnthropicTextAdapter< } try { + logger.request( + `activity=chat provider=anthropic model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'anthropic', model: this.model }, + ) // Make non-streaming request with tool_choice forced to our structured output tool const response = await this.client.messages.create( { @@ -231,6 +249,10 @@ export class AnthropicTextAdapter< } } catch (error: unknown) { const err = error as Error + logger.errors('anthropic.structuredOutput fatal', { + error, + source: 'anthropic.structuredOutput', + }) throw new Error( `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, ) @@ -532,6 +554,7 @@ export class AnthropicTextAdapter< stream: AsyncIterable, options: TextOptions, genId: () => string, + logger: InternalLogger, ): AsyncIterable { const model = options.model let accumulatedContent = '' @@ -558,6 +581,9 @@ export class AnthropicTextAdapter< try { for await (const event of stream) { + logger.provider(`provider=anthropic type=${event.type}`, { + chunk: event, + }) // Emit RUN_STARTED on first event if (!hasEmittedRunStarted) { hasEmittedRunStarted = true @@ -868,6 +894,10 @@ export class AnthropicTextAdapter< } catch (error: unknown) { const err = error as Error & { status?: number; code?: string } + logger.errors('anthropic.processAnthropicStream fatal', { + error, + source: 'anthropic.processAnthropicStream', + }) yield asChunk({ type: 'RUN_ERROR', runId, diff --git a/packages/typescript/ai-elevenlabs/src/realtime/adapter.ts b/packages/typescript/ai-elevenlabs/src/realtime/adapter.ts index 6b5bd95e9..a347d013c 100644 --- a/packages/typescript/ai-elevenlabs/src/realtime/adapter.ts +++ b/packages/typescript/ai-elevenlabs/src/realtime/adapter.ts @@ -1,4 +1,5 @@ import { Conversation } from '@11labs/client' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import type { AnyClientTool, AudioVisualization, @@ -10,6 +11,7 @@ import type { RealtimeStatus, RealtimeToken, } from '@tanstack/ai' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type { RealtimeAdapter, RealtimeConnection } from '@tanstack/ai-client' import type { ElevenLabsRealtimeOptions } from './types' @@ -35,6 +37,8 @@ import type { ElevenLabsRealtimeOptions } from './types' export function elevenlabsRealtime( options: ElevenLabsRealtimeOptions = {}, ): RealtimeAdapter { + const logger = resolveDebugOption(options.debug) + return { provider: 'elevenlabs', @@ -42,7 +46,10 @@ export function elevenlabsRealtime( token: RealtimeToken, clientToolDefs?: ReadonlyArray, ): Promise { - return createElevenLabsConnection(token, options, clientToolDefs) + logger.request(`activity=realtime provider=elevenlabs`, { + provider: 'elevenlabs', + }) + return createElevenLabsConnection(token, options, logger, clientToolDefs) }, } } @@ -53,6 +60,7 @@ export function elevenlabsRealtime( async function createElevenLabsConnection( token: RealtimeToken, _options: ElevenLabsRealtimeOptions, + logger: InternalLogger, clientToolDefs?: ReadonlyArray, ): Promise { const eventHandlers = new Map>>() @@ -108,22 +116,34 @@ async function createElevenLabsConnection( signedUrl: token.token, onConnect: () => { + logger.provider(`provider=elevenlabs direction=in type=connect`, { + frame: { type: 'connect' }, + }) emit('status_change', { status: 'connected' as RealtimeStatus }) emit('mode_change', { mode: 'listening' }) }, onDisconnect: () => { + logger.provider(`provider=elevenlabs direction=in type=disconnect`, { + frame: { type: 'disconnect' }, + }) emit('status_change', { status: 'idle' as RealtimeStatus }) emit('mode_change', { mode: 'idle' }) }, onModeChange: ({ mode }: { mode: string }) => { + logger.provider(`provider=elevenlabs direction=in type=mode_change`, { + frame: { type: 'mode_change', mode }, + }) const mappedMode: RealtimeMode = mode === 'speaking' ? 'speaking' : 'listening' emit('mode_change', { mode: mappedMode }) }, onMessage: ({ message, source }: { message: string; source: string }) => { + logger.provider(`provider=elevenlabs direction=in type=message`, { + frame: { type: 'message', source, message }, + }) const role = source === 'user' ? 'user' : 'assistant' if (role === 'user') { @@ -148,6 +168,10 @@ async function createElevenLabsConnection( }, onError: (error: string | Error) => { + logger.errors('elevenlabs.realtime fatal', { + error, + source: 'elevenlabs.realtime', + }) emit('error', { error: new Error( typeof error === 'string' ? error : error.message || 'Unknown error', @@ -162,9 +186,17 @@ async function createElevenLabsConnection( } // Start the conversation session - conversation = await Conversation.startSession( - sessionOptions as Parameters[0], - ) + try { + conversation = await Conversation.startSession( + sessionOptions as Parameters[0], + ) + } catch (error) { + logger.errors('elevenlabs.realtime fatal', { + error, + source: 'elevenlabs.realtime', + }) + throw error + } // Connection implementation const connection: RealtimeConnection = { @@ -189,14 +221,17 @@ async function createElevenLabsConnection( sendText(text: string) { if (!conversation) return + logger.provider(`provider=elevenlabs direction=out type=user_message`, { + frame: { type: 'user_message', text }, + }) conversation.sendUserMessage(text) }, sendImage(_imageData: string, _mimeType: string) { // ElevenLabs does not support direct image input in the conversation API - console.warn( - 'ElevenLabs realtime does not support sending images directly.', - ) + logger.errors('elevenlabs.realtime sendImage not supported', { + source: 'elevenlabs.realtime', + }) }, sendToolResult(_callId: string, _result: string) { @@ -206,9 +241,9 @@ async function createElevenLabsConnection( updateSession(_config: Partial) { // ElevenLabs session config is set at creation time - console.warn( - 'ElevenLabs does not support runtime session updates. Configure at connection time.', - ) + logger.errors('elevenlabs.realtime updateSession not supported', { + source: 'elevenlabs.realtime', + }) }, interrupt() { diff --git a/packages/typescript/ai-elevenlabs/src/realtime/types.ts b/packages/typescript/ai-elevenlabs/src/realtime/types.ts index c3f5227f7..12d7714f6 100644 --- a/packages/typescript/ai-elevenlabs/src/realtime/types.ts +++ b/packages/typescript/ai-elevenlabs/src/realtime/types.ts @@ -1,3 +1,5 @@ +import type { DebugOption } from '@tanstack/ai' + /** * Options for the ElevenLabs realtime token adapter */ @@ -23,8 +25,13 @@ export interface ElevenLabsRealtimeTokenOptions { export interface ElevenLabsRealtimeOptions { /** Connection mode (default: auto-detect) */ connectionMode?: 'websocket' | 'webrtc' - /** Enable debug logging */ - debug?: boolean + /** + * Enable debug logging for this adapter. + * + * - `true` enables all categories (`request`, `response`, `provider`, `errors`). + * - A {@link DebugConfig} object selects categories and/or a custom sink. + */ + debug?: DebugOption } /** diff --git a/packages/typescript/ai-fal/src/adapters/image.ts b/packages/typescript/ai-fal/src/adapters/image.ts index d3af94e88..2dd88bbe5 100644 --- a/packages/typescript/ai-fal/src/adapters/image.ts +++ b/packages/typescript/ai-fal/src/adapters/image.ts @@ -56,9 +56,24 @@ export class FalImageAdapter extends BaseImageAdapter< FalModelImageSize >, ): Promise { - const input = this.buildInput(options) - const result = await fal.subscribe(this.model, { input }) - return this.transformResponse(result) + const { logger } = options + + logger.request(`activity=generateImage provider=fal model=${this.model}`, { + provider: 'fal', + model: this.model, + }) + + try { + const input = this.buildInput(options) + const result = await fal.subscribe(this.model, { input }) + return this.transformResponse(result) + } catch (error) { + logger.errors('fal.generateImage fatal', { + error, + source: 'fal.generateImage', + }) + throw error + } } private buildInput( diff --git a/packages/typescript/ai-fal/src/adapters/video.ts b/packages/typescript/ai-fal/src/adapters/video.ts index 48cdc72ec..13060223e 100644 --- a/packages/typescript/ai-fal/src/adapters/video.ts +++ b/packages/typescript/ai-fal/src/adapters/video.ts @@ -80,24 +80,38 @@ export class FalVideoAdapter extends BaseVideoAdapter< FalModelVideoSize >, ): Promise { - const { prompt, size, duration, modelOptions } = options - const sizeParams = mapVideoSizeToFalFormat(size) - - const input = { - ...modelOptions, - ...sizeParams, - prompt, - ...(duration ? { duration } : {}), - } as FalModelInput - - // Submit to queue and get request ID - const { request_id } = await fal.queue.submit(this.model, { - input, - }) + const { prompt, size, duration, modelOptions, logger } = options - return { - jobId: request_id, + logger.request(`activity=generateVideo provider=fal model=${this.model}`, { + provider: 'fal', model: this.model, + }) + + try { + const sizeParams = mapVideoSizeToFalFormat(size) + + const input = { + ...modelOptions, + ...sizeParams, + prompt, + ...(duration ? { duration } : {}), + } as FalModelInput + + // Submit to queue and get request ID + const { request_id } = await fal.queue.submit(this.model, { + input, + }) + + return { + jobId: request_id, + model: this.model, + } + } catch (error) { + logger.errors('fal.createVideoJob fatal', { + error, + source: 'fal.createVideoJob', + }) + throw error } } diff --git a/packages/typescript/ai-gemini/src/adapters/image.ts b/packages/typescript/ai-gemini/src/adapters/image.ts index 2ccf47b58..2a21a8714 100644 --- a/packages/typescript/ai-gemini/src/adapters/image.ts +++ b/packages/typescript/ai-gemini/src/adapters/image.ts @@ -81,27 +81,43 @@ export class GeminiImageAdapter< async generateImages( options: ImageGenerationOptions, ): Promise { - const { model, prompt } = options + const { model, prompt, logger } = options - validatePrompt({ prompt, model }) + logger.request( + `activity=generateImage provider=gemini model=${this.model}`, + { + provider: 'gemini', + model: this.model, + }, + ) - if (this.isGeminiImageModel(model)) { - return this.generateWithGeminiApi(options) - } + try { + validatePrompt({ prompt, model }) - // Imagen models path (generateImages API) - validateImageSize(model, options.size) - validateNumberOfImages(model, options.numberOfImages) + if (this.isGeminiImageModel(model)) { + return await this.generateWithGeminiApi(options) + } - const config = this.buildImagenConfig(options) + // Imagen models path (generateImages API) + validateImageSize(model, options.size) + validateNumberOfImages(model, options.numberOfImages) - const response = await this.client.models.generateImages({ - model, - prompt, - config, - }) + const config = this.buildImagenConfig(options) - return this.transformImagenResponse(model, response) + const response = await this.client.models.generateImages({ + model, + prompt, + config, + }) + + return this.transformImagenResponse(model, response) + } catch (error) { + logger.errors('gemini.generateImage fatal', { + error, + source: 'gemini.generateImage', + }) + throw error + } } private isGeminiImageModel(model: string): boolean { diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index 21d459963..e5b3330b5 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -81,8 +81,14 @@ export class GeminiSummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const model = options.model + logger.request(`activity=summarize provider=gemini`, { + provider: 'gemini', + model, + }) + // Build the system prompt based on format const formatInstructions = this.getFormatInstructions(options.style) const lengthInstructions = options.maxLength @@ -91,40 +97,49 @@ export class GeminiSummarizeAdapter< const systemPrompt = `You are a helpful assistant that summarizes text. ${formatInstructions}${lengthInstructions}` - const response = await this.client.models.generateContent({ - model, - contents: [ - { - role: 'user', - parts: [ - { text: `Please summarize the following:\n\n${options.text}` }, - ], + try { + const response = await this.client.models.generateContent({ + model, + contents: [ + { + role: 'user', + parts: [ + { text: `Please summarize the following:\n\n${options.text}` }, + ], + }, + ], + config: { + systemInstruction: systemPrompt, }, - ], - config: { - systemInstruction: systemPrompt, - }, - }) + }) - const summary = response.text ?? '' - const inputTokens = response.usageMetadata?.promptTokenCount ?? 0 - const outputTokens = response.usageMetadata?.candidatesTokenCount ?? 0 + const summary = response.text ?? '' + const inputTokens = response.usageMetadata?.promptTokenCount ?? 0 + const outputTokens = response.usageMetadata?.candidatesTokenCount ?? 0 - return { - id: generateId('sum'), - model, - summary, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, + return { + id: generateId('sum'), + model, + summary, + usage: { + promptTokens: inputTokens, + completionTokens: outputTokens, + totalTokens: inputTokens + outputTokens, + }, + } + } catch (error) { + logger.errors('gemini.summarize fatal', { + error, + source: 'gemini.summarize', + }) + throw error } } async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const model = options.model const id = generateId('sum') let accumulatedContent = '' @@ -139,69 +154,85 @@ export class GeminiSummarizeAdapter< const systemPrompt = `You are a helpful assistant that summarizes text. ${formatInstructions}${lengthInstructions}` - const result = await this.client.models.generateContentStream({ + logger.request(`activity=summarize provider=gemini`, { + provider: 'gemini', model, - contents: [ - { - role: 'user', - parts: [ - { text: `Please summarize the following:\n\n${options.text}` }, - ], - }, - ], - config: { - systemInstruction: systemPrompt, - }, + stream: true, }) - for await (const chunk of result) { - // Track usage metadata - if (chunk.usageMetadata) { - inputTokens = chunk.usageMetadata.promptTokenCount ?? inputTokens - outputTokens = chunk.usageMetadata.candidatesTokenCount ?? outputTokens - } + try { + const result = await this.client.models.generateContentStream({ + model, + contents: [ + { + role: 'user', + parts: [ + { text: `Please summarize the following:\n\n${options.text}` }, + ], + }, + ], + config: { + systemInstruction: systemPrompt, + }, + }) - if (chunk.candidates?.[0]?.content?.parts) { - for (const part of chunk.candidates[0].content.parts) { - if (part.text) { - accumulatedContent += part.text - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: id, - model, - timestamp: Date.now(), - delta: part.text, - content: accumulatedContent, - }) + for await (const chunk of result) { + logger.provider(`provider=gemini`, { chunk }) + // Track usage metadata + if (chunk.usageMetadata) { + inputTokens = chunk.usageMetadata.promptTokenCount ?? inputTokens + outputTokens = + chunk.usageMetadata.candidatesTokenCount ?? outputTokens + } + + if (chunk.candidates?.[0]?.content?.parts) { + for (const part of chunk.candidates[0].content.parts) { + if (part.text) { + accumulatedContent += part.text + yield asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: id, + model, + timestamp: Date.now(), + delta: part.text, + content: accumulatedContent, + }) + } } } - } - // Check for finish reason - const finishReason = chunk.candidates?.[0]?.finishReason - if ( - finishReason === FinishReason.STOP || - finishReason === FinishReason.MAX_TOKENS || - finishReason === FinishReason.SAFETY - ) { - yield asChunk({ - type: 'RUN_FINISHED', - runId: id, - model, - timestamp: Date.now(), - finishReason: - finishReason === FinishReason.STOP - ? 'stop' - : finishReason === FinishReason.MAX_TOKENS - ? 'length' - : 'content_filter', - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, - }) + // Check for finish reason + const finishReason = chunk.candidates?.[0]?.finishReason + if ( + finishReason === FinishReason.STOP || + finishReason === FinishReason.MAX_TOKENS || + finishReason === FinishReason.SAFETY + ) { + yield asChunk({ + type: 'RUN_FINISHED', + runId: id, + model, + timestamp: Date.now(), + finishReason: + finishReason === FinishReason.STOP + ? 'stop' + : finishReason === FinishReason.MAX_TOKENS + ? 'length' + : 'content_filter', + usage: { + promptTokens: inputTokens, + completionTokens: outputTokens, + totalTokens: inputTokens + outputTokens, + }, + }) + } } + } catch (error) { + logger.errors('gemini.summarize fatal', { + error, + source: 'gemini.summarize', + }) + throw error } } diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index 0ed39cebf..10754d46b 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -15,6 +15,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type { Content, GenerateContentParameters, @@ -106,14 +107,23 @@ export class GeminiTextAdapter< options: TextOptions, ): AsyncIterable { const mappedOptions = this.mapCommonOptionsToGemini(options) + const { logger } = options try { + logger.request( + `activity=chat provider=gemini model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'gemini', model: this.model }, + ) const result = await this.client.models.generateContentStream(mappedOptions) - yield* this.processStreamChunks(result, options) + yield* this.processStreamChunks(result, options, logger) } catch (error) { const timestamp = Date.now() + logger.errors('gemini.chatStream fatal', { + error, + source: 'gemini.chatStream', + }) yield asChunk({ type: 'RUN_ERROR', model: options.model, @@ -141,10 +151,15 @@ export class GeminiTextAdapter< options: StructuredOutputOptions, ): Promise> { const { chatOptions, outputSchema } = options + const { logger } = chatOptions const mappedOptions = this.mapCommonOptionsToGemini(chatOptions) try { + logger.request( + `activity=chat provider=gemini model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'gemini', model: this.model }, + ) // Add structured output configuration const result = await this.client.models.generateContent({ ...mappedOptions, @@ -173,6 +188,10 @@ export class GeminiTextAdapter< rawText, } } catch (error) { + logger.errors('gemini.structuredOutput fatal', { + error, + source: 'gemini.structuredOutput', + }) throw new Error( error instanceof Error ? error.message @@ -201,6 +220,7 @@ export class GeminiTextAdapter< private async *processStreamChunks( result: AsyncGenerator, options: TextOptions, + logger: InternalLogger, ): AsyncIterable { const model = options.model const timestamp = Date.now() @@ -230,6 +250,7 @@ export class GeminiTextAdapter< let hasEmittedStepStarted = false for await (const chunk of result) { + logger.provider(`provider=gemini`, { chunk }) // Emit RUN_STARTED on first chunk if (!hasEmittedRunStarted) { hasEmittedRunStarted = true diff --git a/packages/typescript/ai-gemini/src/adapters/tts.ts b/packages/typescript/ai-gemini/src/adapters/tts.ts index 5850bb156..cd5d5f053 100644 --- a/packages/typescript/ai-gemini/src/adapters/tts.ts +++ b/packages/typescript/ai-gemini/src/adapters/tts.ts @@ -98,63 +98,77 @@ export class GeminiTTSAdapter< async generateSpeech( options: TTSOptions, ): Promise { + const { logger } = options const { model, text, modelOptions } = options + logger.request(`activity=generateSpeech provider=gemini model=${model}`, { + provider: 'gemini', + model, + }) + const voiceConfig = modelOptions?.voiceConfig || { prebuiltVoiceConfig: { voiceName: 'Kore', }, } - const response = await this.client.models.generateContent({ - model, - contents: [ - { - role: 'user', - parts: [{ text }], + try { + const response = await this.client.models.generateContent({ + model, + contents: [ + { + role: 'user', + parts: [{ text }], + }, + ], + config: { + responseModalities: ['AUDIO'], + speechConfig: { + voiceConfig, + ...(modelOptions?.languageCode && { + languageCode: modelOptions.languageCode, + }), + }, }, - ], - config: { - responseModalities: ['AUDIO'], - speechConfig: { - voiceConfig, - ...(modelOptions?.languageCode && { - languageCode: modelOptions.languageCode, - }), - }, - }, - ...(modelOptions?.systemInstruction && { - systemInstruction: modelOptions.systemInstruction, - }), - }) - - // Extract audio data from response - const candidate = response.candidates?.[0] - const parts = candidate?.content?.parts - - if (!parts || parts.length === 0) { - throw new Error('No audio output received from Gemini TTS') - } - - // Look for inline data (audio) - const audioPart = parts.find((part: any) => - part.inlineData?.mimeType?.startsWith('audio/'), - ) - - if (!audioPart || !audioPart.inlineData || !audioPart.inlineData.data) { - throw new Error('No audio data in Gemini TTS response') - } - - const audioBase64 = audioPart.inlineData.data - const mimeType = audioPart.inlineData.mimeType || 'audio/wav' - const format = mimeType.split('/')[1] || 'wav' - - return { - id: generateId(this.name), - model, - audio: audioBase64, - format, - contentType: mimeType, + ...(modelOptions?.systemInstruction && { + systemInstruction: modelOptions.systemInstruction, + }), + }) + + // Extract audio data from response + const candidate = response.candidates?.[0] + const parts = candidate?.content?.parts + + if (!parts || parts.length === 0) { + throw new Error('No audio output received from Gemini TTS') + } + + // Look for inline data (audio) + const audioPart = parts.find((part: any) => + part.inlineData?.mimeType?.startsWith('audio/'), + ) + + if (!audioPart || !audioPart.inlineData || !audioPart.inlineData.data) { + throw new Error('No audio data in Gemini TTS response') + } + + const audioBase64 = audioPart.inlineData.data + const mimeType = audioPart.inlineData.mimeType || 'audio/wav' + const format = mimeType.split('/')[1] || 'wav' + + return { + id: generateId(this.name), + model, + audio: audioBase64, + format, + contentType: mimeType, + } + } catch (error) { + logger.errors('gemini.generateSpeech fatal', { + error, + source: 'gemini.generateSpeech', + }) + throw error } } } diff --git a/packages/typescript/ai-gemini/tests/image-adapter.test.ts b/packages/typescript/ai-gemini/tests/image-adapter.test.ts index 9ca3af076..34d997b4f 100644 --- a/packages/typescript/ai-gemini/tests/image-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/image-adapter.test.ts @@ -1,4 +1,5 @@ import { describe, it, expect, vi } from 'vitest' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { GeminiImageAdapter, createGeminiImage } from '../src/adapters/image' import { parseNativeImageSize, @@ -8,6 +9,8 @@ import { validatePrompt, } from '../src/image/image-provider-options' +const testLogger = resolveDebugOption(false) + describe('Gemini Image Adapter', () => { describe('createGeminiImage', () => { it('creates an adapter with the provided API key', () => { @@ -174,6 +177,7 @@ describe('Gemini Image Adapter', () => { prompt: 'A cat wearing a hat', numberOfImages: 1, size: '1024x1024', + logger: testLogger, }) expect(mockGenerateImages).toHaveBeenCalledWith({ @@ -214,11 +218,13 @@ describe('Gemini Image Adapter', () => { const result1 = await adapter.generateImages({ model: 'imagen-3.0-generate-002', prompt: 'Test prompt', + logger: testLogger, }) const result2 = await adapter.generateImages({ model: 'imagen-3.0-generate-002', prompt: 'Test prompt', + logger: testLogger, }) expect(result1.id).not.toBe(result2.id) @@ -264,6 +270,7 @@ describe('Gemini Image Adapter', () => { model: 'gemini-3.1-flash-image-preview', prompt: 'A futuristic city', size: '16:9_4K', + logger: testLogger, }) expect(mockGenerateContent).toHaveBeenCalledWith({ @@ -320,6 +327,7 @@ describe('Gemini Image Adapter', () => { const result = await adapter.generateImages({ model: 'gemini-3.1-flash-image-preview', prompt: 'A simple sketch', + logger: testLogger, }) expect(mockGenerateContent).toHaveBeenCalledWith({ @@ -363,6 +371,7 @@ describe('Gemini Image Adapter', () => { const result = await adapter.generateImages({ model: 'gemini-3.1-flash-image-preview', prompt: 'A test prompt', + logger: testLogger, }) expect(result.images).toHaveLength(0) @@ -409,6 +418,7 @@ describe('Gemini Image Adapter', () => { model: 'gemini-3.1-flash-image-preview', prompt: 'A futuristic city', numberOfImages: 3, + logger: testLogger, }) expect(mockGenerateContent).toHaveBeenCalledWith({ @@ -460,6 +470,7 @@ describe('Gemini Image Adapter', () => { model: 'gemini-3.1-flash-image-preview', prompt: 'A simple sketch', numberOfImages: 1, + logger: testLogger, }) expect(mockGenerateContent).toHaveBeenCalledWith({ @@ -505,6 +516,7 @@ describe('Gemini Image Adapter', () => { await adapter.generateImages({ model: 'gemini-3.1-flash-image-preview', prompt: 'A simple sketch', + logger: testLogger, }) expect(mockGenerateContent).toHaveBeenCalledWith({ diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 4bdabd355..49cb1780f 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -56,22 +56,35 @@ export class GrokImageAdapter< async generateImages( options: ImageGenerationOptions, ): Promise { - const { model, prompt, numberOfImages, size } = options + const { model, prompt, numberOfImages, size, logger } = options - // Validate inputs - validatePrompt({ prompt, model }) - validateImageSize(model, size) - validateNumberOfImages(model, numberOfImages) + logger.request(`activity=generateImage provider=grok model=${this.model}`, { + provider: 'grok', + model: this.model, + }) - // Build request based on model type - const request = this.buildRequest(options) + try { + // Validate inputs + validatePrompt({ prompt, model }) + validateImageSize(model, size) + validateNumberOfImages(model, numberOfImages) - const response = await this.client.images.generate({ - ...request, - stream: false, - }) + // Build request based on model type + const request = this.buildRequest(options) - return this.transformResponse(model, response) + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + return this.transformResponse(model, response) + } catch (error) { + logger.errors('grok.generateImage fatal', { + error, + source: 'grok.generateImage', + }) + throw error + } } private buildRequest( diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index e9de0b663..eadaaf9e6 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -47,36 +47,51 @@ export class GrokSummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) + logger.request(`activity=summarize provider=grok`, { + provider: 'grok', + model: options.model, + }) + // Use the text adapter's streaming and collect the result let summary = '' const id = '' let model = options.model let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta + try { + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + })) { + // AG-UI TEXT_MESSAGE_CONTENT event + if (chunk.type === 'TEXT_MESSAGE_CONTENT') { + if (chunk.content) { + summary = chunk.content + } else { + summary += chunk.delta + } + model = chunk.model || model } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage + // AG-UI RUN_FINISHED event + if (chunk.type === 'RUN_FINISHED') { + if (chunk.usage) { + usage = chunk.usage + } } } + } catch (error) { + logger.errors('grok.summarize fatal', { + error, + source: 'grok.summarize', + }) + throw error } return { id, model, summary, usage } @@ -85,16 +100,32 @@ export class GrokSummarizeAdapter< async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) - // Delegate directly to the text adapter's streaming - yield* this.textAdapter.chatStream({ + logger.request(`activity=summarize provider=grok`, { + provider: 'grok', model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, + stream: true, }) + + try { + // Delegate directly to the text adapter's streaming + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + }) + } catch (error) { + logger.errors('grok.summarize fatal', { + error, + source: 'grok.summarize', + }) + throw error + } } private buildSummarizationPrompt(options: SummarizationOptions): string { diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index 9902354f3..13cd3bc49 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -17,6 +17,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type OpenAI_SDK from 'openai' import type { ContentPart, @@ -75,6 +76,7 @@ export class GrokTextAdapter< ): AsyncIterable { const requestParams = this.mapTextOptionsToGrok(options) const timestamp = Date.now() + const { logger } = options // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) const aguiState = { @@ -86,12 +88,16 @@ export class GrokTextAdapter< } try { + logger.request( + `activity=chat provider=grok model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'grok', model: this.model }, + ) const stream = await this.client.chat.completions.create({ ...requestParams, stream: true, }) - yield* this.processGrokStreamChunks(stream, options, aguiState) + yield* this.processGrokStreamChunks(stream, options, aguiState, logger) } catch (error: unknown) { const err = error as Error & { code?: string } @@ -121,10 +127,10 @@ export class GrokTextAdapter< }, }) - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) + logger.errors('grok.chatStream fatal', { + error, + source: 'grok.chatStream', + }) } } @@ -145,6 +151,7 @@ export class GrokTextAdapter< ): Promise> { const { chatOptions, outputSchema } = options const requestParams = this.mapTextOptionsToGrok(chatOptions) + const { logger } = chatOptions // Apply Grok-specific transformations for structured output compatibility const jsonSchema = makeGrokStructuredOutputCompatible( @@ -153,6 +160,10 @@ export class GrokTextAdapter< ) try { + logger.request( + `activity=chat provider=grok model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'grok', model: this.model }, + ) const response = await this.client.chat.completions.create({ ...requestParams, stream: false, @@ -188,9 +199,10 @@ export class GrokTextAdapter< rawText, } } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) + logger.errors('grok.structuredOutput fatal', { + error, + source: 'grok.structuredOutput', + }) throw error } } @@ -205,6 +217,7 @@ export class GrokTextAdapter< timestamp: number hasEmittedRunStarted: boolean }, + logger: InternalLogger, ): AsyncIterable { let accumulatedContent = '' const timestamp = aguiState.timestamp @@ -223,6 +236,7 @@ export class GrokTextAdapter< try { for await (const chunk of stream) { + logger.provider(`provider=grok`, { chunk }) const choice = chunk.choices[0] if (!choice) continue @@ -392,7 +406,10 @@ export class GrokTextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log('[Grok Adapter] Stream ended with error:', err.message) + logger.errors('grok stream ended with error', { + error, + source: 'grok.processGrokStreamChunks', + }) // Emit AG-UI RUN_ERROR yield asChunk({ diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 14e3e57c7..f992cfadb 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -1,9 +1,13 @@ import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { createGrokText, grokText } from '../src/adapters/text' import { createGrokImage, grokImage } from '../src/adapters/image' import { createGrokSummarize, grokSummarize } from '../src/adapters/summarize' import type { StreamChunk, Tool } from '@tanstack/ai' +// Test helper: a silent logger for test chatStream calls. +const testLogger = resolveDebugOption(false) + // Declare mockCreate at module level let mockCreate: ReturnType @@ -195,6 +199,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -242,6 +247,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -300,6 +306,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -391,6 +398,7 @@ describe('Grok AG-UI event emission', () => { model: 'grok-3', messages: [{ role: 'user', content: 'Weather in Berlin?' }], tools: [weatherTool], + logger: testLogger, })) { chunks.push(chunk) } @@ -458,6 +466,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -506,6 +515,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -582,6 +592,7 @@ describe('Grok AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'grok-3', messages: [{ role: 'user', content: 'Say hello' }], + logger: testLogger, })) { chunks.push(chunk) } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 7a170b80c..dbac440c2 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -17,6 +17,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type GROQ_SDK from 'groq-sdk' import type { ChatCompletionCreateParamsStreaming } from 'groq-sdk/resources/chat/completions' import type { @@ -78,6 +79,7 @@ export class GroqTextAdapter< ): AsyncIterable { const requestParams = this.mapTextOptionsToGroq(options) const timestamp = Date.now() + const { logger } = options const aguiState = { runId: options.runId ?? generateId(this.name), @@ -88,12 +90,16 @@ export class GroqTextAdapter< } try { + logger.request( + `activity=chat provider=groq model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'groq', model: this.model }, + ) const stream = await this.client.chat.completions.create({ ...requestParams, stream: true, }) - yield* this.processGroqStreamChunks(stream, options, aguiState) + yield* this.processGroqStreamChunks(stream, options, aguiState, logger) } catch (error: unknown) { const err = error as Error & { code?: string } @@ -121,10 +127,10 @@ export class GroqTextAdapter< }, }) - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) + logger.errors('groq.chatStream fatal', { + error, + source: 'groq.chatStream', + }) } } @@ -145,6 +151,7 @@ export class GroqTextAdapter< ): Promise> { const { chatOptions, outputSchema } = options const requestParams = this.mapTextOptionsToGroq(chatOptions) + const { logger } = chatOptions const jsonSchema = makeGroqStructuredOutputCompatible( outputSchema, @@ -152,6 +159,10 @@ export class GroqTextAdapter< ) try { + logger.request( + `activity=chat provider=groq model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'groq', model: this.model }, + ) const response = await this.client.chat.completions.create({ ...requestParams, stream: false, @@ -183,9 +194,10 @@ export class GroqTextAdapter< rawText, } } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) + logger.errors('groq.structuredOutput fatal', { + error, + source: 'groq.structuredOutput', + }) throw error } } @@ -204,6 +216,7 @@ export class GroqTextAdapter< timestamp: number hasEmittedRunStarted: boolean }, + logger: InternalLogger, ): AsyncIterable { let accumulatedContent = '' const timestamp = aguiState.timestamp @@ -221,6 +234,7 @@ export class GroqTextAdapter< try { for await (const chunk of stream) { + logger.provider(`provider=groq`, { chunk }) const choice = chunk.choices[0] if (!choice) continue @@ -383,7 +397,10 @@ export class GroqTextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log('[Groq Adapter] Stream ended with error:', err.message) + logger.errors('groq stream ended with error', { + error, + source: 'groq.processGroqStreamChunks', + }) yield asChunk({ type: 'RUN_ERROR', diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index 1562b0623..da421a8b5 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -7,9 +7,13 @@ import { beforeEach, type Mock, } from 'vitest' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { createGroqText, groqText } from '../src/adapters/text' import type { StreamChunk, Tool } from '@tanstack/ai' +// Test helper: a silent logger for test chatStream calls. +const testLogger = resolveDebugOption(false) + // Declare mockCreate at module level let mockCreate: Mock<(...args: Array) => unknown> @@ -155,6 +159,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -204,6 +209,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -264,6 +270,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -357,6 +364,7 @@ describe('Groq AG-UI event emission', () => { model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Weather in Berlin?' }], tools: [weatherTool], + logger: testLogger, })) { chunks.push(chunk) } @@ -424,6 +432,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -474,6 +483,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -552,6 +562,7 @@ describe('Groq AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'llama-3.3-70b-versatile', messages: [{ role: 'user', content: 'Say hello' }], + logger: testLogger, })) { chunks.push(chunk) } diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index 3b091543d..0b8407e4b 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -79,82 +79,113 @@ export class OllamaSummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const model = options.model - const prompt = this.buildSummarizationPrompt(options) - - const response = await this.client.generate({ + logger.request(`activity=summarize provider=ollama`, { + provider: 'ollama', model, - prompt, - options: { - temperature: 0.3, - num_predict: options.maxLength ?? 500, - }, - stream: false, }) - const promptTokens = estimateTokens(prompt) - const completionTokens = estimateTokens(response.response) - - return { - id: generateId('sum'), - model: response.model, - summary: response.response, - usage: { - promptTokens, - completionTokens, - totalTokens: promptTokens + completionTokens, - }, + const prompt = this.buildSummarizationPrompt(options) + + try { + const response = await this.client.generate({ + model, + prompt, + options: { + temperature: 0.3, + num_predict: options.maxLength ?? 500, + }, + stream: false, + }) + + const promptTokens = estimateTokens(prompt) + const completionTokens = estimateTokens(response.response) + + return { + id: generateId('sum'), + model: response.model, + summary: response.response, + usage: { + promptTokens, + completionTokens, + totalTokens: promptTokens + completionTokens, + }, + } + } catch (error) { + logger.errors('ollama.summarize fatal', { + error, + source: 'ollama.summarize', + }) + throw error } } async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const model = options.model const id = generateId('sum') const prompt = this.buildSummarizationPrompt(options) let accumulatedContent = '' - const stream = await this.client.generate({ + logger.request(`activity=summarize provider=ollama`, { + provider: 'ollama', model, - prompt, - options: { - temperature: 0.3, - num_predict: options.maxLength ?? 500, - }, stream: true, }) - for await (const chunk of stream) { - if (chunk.response) { - accumulatedContent += chunk.response - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: id, - model: chunk.model, - timestamp: Date.now(), - delta: chunk.response, - content: accumulatedContent, - }) - } - - if (chunk.done) { - const promptTokens = estimateTokens(prompt) - const completionTokens = estimateTokens(accumulatedContent) - yield asChunk({ - type: 'RUN_FINISHED', - runId: id, - model: chunk.model, - timestamp: Date.now(), - finishReason: 'stop', - usage: { - promptTokens, - completionTokens, - totalTokens: promptTokens + completionTokens, - }, - }) + try { + const stream = await this.client.generate({ + model, + prompt, + options: { + temperature: 0.3, + num_predict: options.maxLength ?? 500, + }, + stream: true, + }) + + for await (const chunk of stream) { + logger.provider(`provider=ollama`, { chunk }) + + if (chunk.response) { + accumulatedContent += chunk.response + yield asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: id, + model: chunk.model, + timestamp: Date.now(), + delta: chunk.response, + content: accumulatedContent, + }) + } + + if (chunk.done) { + const promptTokens = estimateTokens(prompt) + const completionTokens = estimateTokens(accumulatedContent) + yield asChunk({ + type: 'RUN_FINISHED', + runId: id, + model: chunk.model, + timestamp: Date.now(), + finishReason: 'stop', + usage: { + promptTokens, + completionTokens, + totalTokens: promptTokens + completionTokens, + }, + }) + } } + } catch (error) { + logger.errors('ollama.summarize fatal', { + error, + source: 'ollama.summarize', + }) + throw error } } diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 4332c125f..07da8acab 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -11,6 +11,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type { AbortableAsyncIterator, ChatRequest, @@ -146,11 +147,24 @@ export class OllamaTextAdapter extends BaseTextAdapter< async *chatStream(options: TextOptions): AsyncIterable { const mappedOptions = this.mapCommonOptionsToOllama(options) - const response = await this.client.chat({ - ...mappedOptions, - stream: true, - }) - yield* this.processOllamaStreamChunks(response, options) + const { logger } = options + try { + logger.request( + `activity=chat provider=ollama model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'ollama', model: this.model }, + ) + const response = await this.client.chat({ + ...mappedOptions, + stream: true, + }) + yield* this.processOllamaStreamChunks(response, options, logger) + } catch (error: unknown) { + logger.errors('ollama.chatStream fatal', { + error, + source: 'ollama.chatStream', + }) + throw error + } } /** @@ -162,10 +176,15 @@ export class OllamaTextAdapter extends BaseTextAdapter< options: StructuredOutputOptions>, ): Promise> { const { chatOptions, outputSchema } = options + const { logger } = chatOptions const mappedOptions = this.mapCommonOptionsToOllama(chatOptions) try { + logger.request( + `activity=chat provider=ollama model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'ollama', model: this.model }, + ) // Make non-streaming request with JSON format const response = await this.client.chat({ ...mappedOptions, @@ -191,6 +210,10 @@ export class OllamaTextAdapter extends BaseTextAdapter< } } catch (error: unknown) { const err = error as Error + logger.errors('ollama.structuredOutput fatal', { + error, + source: 'ollama.structuredOutput', + }) throw new Error( `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, ) @@ -200,6 +223,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< private async *processOllamaStreamChunks( stream: AbortableAsyncIterator, options: TextOptions, + logger: InternalLogger, ): AsyncIterable { let accumulatedContent = '' const timestamp = Date.now() @@ -218,6 +242,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< let hasEmittedStepStarted = false for await (const chunk of stream) { + logger.provider(`provider=ollama`, { chunk }) // Emit RUN_STARTED on first chunk if (!hasEmittedRunStarted) { hasEmittedRunStarted = true diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index 585e8a72f..5a0e38ba2 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -60,22 +60,38 @@ export class OpenAIImageAdapter< async generateImages( options: ImageGenerationOptions, ): Promise { - const { model, prompt, numberOfImages, size } = options + const { model, prompt, numberOfImages, size, logger } = options - // Validate inputs - validatePrompt({ prompt, model }) - validateImageSize(model, size) - validateNumberOfImages(model, numberOfImages) + logger.request( + `activity=generateImage provider=openai model=${this.model}`, + { + provider: 'openai', + model: this.model, + }, + ) - // Build request based on model type - const request = this.buildRequest(options) + try { + // Validate inputs + validatePrompt({ prompt, model }) + validateImageSize(model, size) + validateNumberOfImages(model, numberOfImages) - const response = await this.client.images.generate({ - ...request, - stream: false, - }) + // Build request based on model type + const request = this.buildRequest(options) - return this.transformResponse(model, response) + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + return this.transformResponse(model, response) + } catch (error) { + logger.errors('openai.generateImage fatal', { + error, + source: 'openai.generateImage', + }) + throw error + } } private buildRequest( diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index 6db5d874e..25fcc17af 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -44,36 +44,51 @@ export class OpenAISummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) + logger.request(`activity=summarize provider=openai`, { + provider: 'openai', + model: options.model, + }) + // Use the text adapter's streaming and collect the result let summary = '' const id = '' let model = options.model let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta + try { + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + })) { + // AG-UI TEXT_MESSAGE_CONTENT event + if (chunk.type === 'TEXT_MESSAGE_CONTENT') { + if (chunk.content) { + summary = chunk.content + } else { + summary += chunk.delta + } + model = chunk.model || model } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage + // AG-UI RUN_FINISHED event + if (chunk.type === 'RUN_FINISHED') { + if (chunk.usage) { + usage = chunk.usage + } } } + } catch (error) { + logger.errors('openai.summarize fatal', { + error, + source: 'openai.summarize', + }) + throw error } return { id, model, summary, usage } @@ -82,16 +97,32 @@ export class OpenAISummarizeAdapter< async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) - // Delegate directly to the text adapter's streaming - yield* this.textAdapter.chatStream({ + logger.request(`activity=summarize provider=openai`, { + provider: 'openai', model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, + stream: true, }) + + try { + // Delegate directly to the text adapter's streaming + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + }) + } catch (error) { + logger.errors('openai.summarize fatal', { + error, + source: 'openai.summarize', + }) + throw error + } } private buildSummarizationPrompt(options: SummarizationOptions): string { diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index ad9025fd4..6aa839082 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -20,6 +20,7 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type OpenAI_SDK from 'openai' import type { Responses } from 'openai/resources' import type { @@ -115,8 +116,13 @@ export class OpenAITextAdapter< { index: number; name: string; started: boolean } >() const requestArguments = this.mapTextOptionsToOpenAI(options) + const { logger } = options try { + logger.request( + `activity=chat provider=openai model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'openai', model: this.model }, + ) const response = await this.client.responses.create( { ...requestArguments, @@ -134,13 +140,13 @@ export class OpenAITextAdapter< toolCallMetadata, options, () => generateId(this.name), + logger, ) } catch (error: unknown) { - const err = error as Error - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) + logger.errors('openai.chatStream fatal', { + error, + source: 'openai.chatStream', + }) throw error } } @@ -162,6 +168,7 @@ export class OpenAITextAdapter< ): Promise> { const { chatOptions, outputSchema } = options const requestArguments = this.mapTextOptionsToOpenAI(chatOptions) + const { logger } = chatOptions // Apply OpenAI-specific transformations for structured output compatibility const jsonSchema = makeOpenAIStructuredOutputCompatible( @@ -170,6 +177,10 @@ export class OpenAITextAdapter< ) try { + logger.request( + `activity=chat provider=openai model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'openai', model: this.model }, + ) const response = await this.client.responses.create( { ...requestArguments, @@ -212,9 +223,10 @@ export class OpenAITextAdapter< rawText, } } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) + logger.errors('openai.structuredOutput fatal', { + error, + source: 'openai.structuredOutput', + }) throw error } } @@ -248,6 +260,7 @@ export class OpenAITextAdapter< >, options: TextOptions, genId: () => string, + logger: InternalLogger, ): AsyncIterable { let accumulatedContent = '' let accumulatedReasoning = '' @@ -275,6 +288,9 @@ export class OpenAITextAdapter< try { for await (const chunk of stream) { chunkCount++ + logger.provider(`provider=openai type=${chunk.type}`, { + chunk, + }) // Emit RUN_STARTED on first chunk if (!hasEmittedRunStarted) { @@ -787,13 +803,11 @@ export class OpenAITextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log( - '[OpenAI Adapter] Stream ended with error. Event type summary:', - { - totalChunks: chunkCount, - error: err.message, - }, - ) + logger.errors('openai stream ended with error', { + error, + source: 'openai.processOpenAIStreamChunks', + totalChunks: chunkCount, + }) yield asChunk({ type: 'RUN_ERROR', runId, diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 796bc0b29..10b46a4f9 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -47,63 +47,79 @@ export class OpenAITranscriptionAdapter< async transcribe( options: TranscriptionOptions, ): Promise { + const { logger } = options const { model, audio, language, prompt, responseFormat, modelOptions } = options - // Convert audio input to File object - const file = this.prepareAudioFile(audio) - - // Build request - const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { - model, - file, - language, - prompt, - response_format: this.mapResponseFormat(responseFormat), - ...modelOptions, - } - - // Call OpenAI API - use verbose_json to get timestamps when available - const useVerbose = - responseFormat === 'verbose_json' || - (!responseFormat && model !== 'whisper-1') + logger.request( + `activity=generateTranscription provider=openai model=${model}`, + { provider: 'openai', model }, + ) - if (useVerbose) { - const response = await this.client.audio.transcriptions.create({ - ...request, - response_format: 'verbose_json', - }) + try { + // Convert audio input to File object + const file = this.prepareAudioFile(audio) - return { - id: generateId(this.name), + // Build request + const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { model, - text: response.text, - language: response.language, - duration: response.duration, - segments: response.segments?.map( - (seg): TranscriptionSegment => ({ - id: seg.id, - start: seg.start, - end: seg.end, - text: seg.text, - confidence: seg.avg_logprob ? Math.exp(seg.avg_logprob) : undefined, - }), - ), - words: response.words?.map((w) => ({ - word: w.word, - start: w.start, - end: w.end, - })), + file, + language, + prompt, + response_format: this.mapResponseFormat(responseFormat), + ...modelOptions, } - } else { - const response = await this.client.audio.transcriptions.create(request) - return { - id: generateId(this.name), - model, - text: typeof response === 'string' ? response : response.text, - language, + // Call OpenAI API - use verbose_json to get timestamps when available + const useVerbose = + responseFormat === 'verbose_json' || + (!responseFormat && model !== 'whisper-1') + + if (useVerbose) { + const response = await this.client.audio.transcriptions.create({ + ...request, + response_format: 'verbose_json', + }) + + return { + id: generateId(this.name), + model, + text: response.text, + language: response.language, + duration: response.duration, + segments: response.segments?.map( + (seg): TranscriptionSegment => ({ + id: seg.id, + start: seg.start, + end: seg.end, + text: seg.text, + confidence: seg.avg_logprob + ? Math.exp(seg.avg_logprob) + : undefined, + }), + ), + words: response.words?.map((w) => ({ + word: w.word, + start: w.start, + end: w.end, + })), + } + } else { + const response = await this.client.audio.transcriptions.create(request) + + return { + id: generateId(this.name), + model, + text: typeof response === 'string' ? response : response.text, + language, + } } + } catch (error) { + logger.errors('openai.transcribe fatal', { + error, + source: 'openai.transcribe', + }) + throw error } } diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index 2f34e50fa..1ef7741e2 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -50,8 +50,14 @@ export class OpenAITTSAdapter< async generateSpeech( options: TTSOptions, ): Promise { + const { logger } = options const { model, text, voice, format, speed, modelOptions } = options + logger.request(`activity=generateSpeech provider=openai model=${model}`, { + provider: 'openai', + model, + }) + // Validate inputs using existing validators const audioOptions = { input: text, @@ -76,22 +82,30 @@ export class OpenAITTSAdapter< ...modelOptions, } - // Call OpenAI API - const response = await this.client.audio.speech.create(request) + try { + // Call OpenAI API + const response = await this.client.audio.speech.create(request) - // Convert response to base64 - const arrayBuffer = await response.arrayBuffer() - const base64 = Buffer.from(arrayBuffer).toString('base64') + // Convert response to base64 + const arrayBuffer = await response.arrayBuffer() + const base64 = Buffer.from(arrayBuffer).toString('base64') - const outputFormat = format || 'mp3' - const contentType = this.getContentType(outputFormat) + const outputFormat = format || 'mp3' + const contentType = this.getContentType(outputFormat) - return { - id: generateId(this.name), - model, - audio: base64, - format: outputFormat, - contentType, + return { + id: generateId(this.name), + model, + audio: base64, + format: outputFormat, + contentType, + } + } catch (error) { + logger.errors('openai.generateSpeech fatal', { + error, + source: 'openai.generateSpeech', + }) + throw error } } diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index 1f882d16d..68366a811 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -80,18 +80,26 @@ export class OpenAIVideoAdapter< async createVideoJob( options: VideoGenerationOptions, ): Promise { - const { model, size, duration, modelOptions } = options + const { model, size, duration, modelOptions, logger } = options - // Validate inputs - validateVideoSize(model, size) - // Duration maps to 'seconds' in the API - const seconds = duration ?? modelOptions?.seconds - validateVideoSeconds(model, seconds) - - // Build request - const request = this.buildRequest(options) + logger.request( + `activity=generateVideo provider=openai model=${this.model}`, + { + provider: 'openai', + model: this.model, + }, + ) try { + // Validate inputs + validateVideoSize(model, size) + // Duration maps to 'seconds' in the API + const seconds = duration ?? modelOptions?.seconds + validateVideoSeconds(model, seconds) + + // Build request + const request = this.buildRequest(options) + // POST /v1/videos // Cast to any because the videos API may not be in SDK types yet const client = this.client @@ -102,6 +110,10 @@ export class OpenAIVideoAdapter< model, } } catch (error: any) { + logger.errors('openai.createVideoJob fatal', { + error, + source: 'openai.createVideoJob', + }) // Fallback for when the videos API is not available if (error?.message?.includes('videos') || error?.code === 'invalid_api') { throw new Error( diff --git a/packages/typescript/ai-openai/src/realtime/adapter.ts b/packages/typescript/ai-openai/src/realtime/adapter.ts index 36d6cea5e..d1858ee4e 100644 --- a/packages/typescript/ai-openai/src/realtime/adapter.ts +++ b/packages/typescript/ai-openai/src/realtime/adapter.ts @@ -1,3 +1,4 @@ +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import type { AnyClientTool, AudioVisualization, @@ -9,6 +10,7 @@ import type { RealtimeStatus, RealtimeToken, } from '@tanstack/ai' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' import type { RealtimeAdapter, RealtimeConnection } from '@tanstack/ai-client' import type { OpenAIRealtimeOptions } from './types' @@ -37,6 +39,7 @@ export function openaiRealtime( options: OpenAIRealtimeOptions = {}, ): RealtimeAdapter { const connectionMode = options.connectionMode ?? 'webrtc' + const logger = resolveDebugOption(options.debug) return { provider: 'openai', @@ -45,10 +48,21 @@ export function openaiRealtime( token: RealtimeToken, _clientTools?: ReadonlyArray, ): Promise { + const model = token.config.model ?? 'gpt-4o-realtime-preview' + logger.request(`activity=realtime provider=openai model=${model}`, { + provider: 'openai', + model, + }) + if (connectionMode === 'webrtc') { - return createWebRTCConnection(token) + return createWebRTCConnection(token, logger) } - throw new Error('WebSocket connection mode not yet implemented') + const error = new Error('WebSocket connection mode not yet implemented') + logger.errors('openai.realtime fatal', { + error, + source: 'openai.realtime', + }) + throw error }, } } @@ -58,6 +72,7 @@ export function openaiRealtime( */ async function createWebRTCConnection( token: RealtimeToken, + logger: InternalLogger, ): Promise { const model = token.config.model ?? 'gpt-4o-realtime-preview' const eventHandlers = new Map>>() @@ -116,13 +131,24 @@ async function createWebRTCConnection( dataChannel.onmessage = (event) => { try { const message = JSON.parse(event.data) + logger.provider( + `provider=openai direction=in type=${(message as { type?: string }).type ?? ''}`, + { frame: message }, + ) handleServerEvent(message) } catch (e) { - console.error('Failed to parse realtime event:', e) + logger.errors('openai.realtime fatal', { + error: e, + source: 'openai.realtime', + }) } } dataChannel.onerror = (error) => { + logger.errors('openai.realtime fatal', { + error, + source: 'openai.realtime', + }) emit('error', { error: new Error(`Data channel error: ${error}`) }) } @@ -170,9 +196,14 @@ async function createWebRTCConnection( if (!sdpResponse.ok) { const errorText = await sdpResponse.text() - throw new Error( + const error = new Error( `Failed to establish WebRTC connection: ${sdpResponse.status} - ${errorText}`, ) + logger.errors('openai.realtime fatal', { + error, + source: 'openai.realtime', + }) + throw error } const answerSdp = await sdpResponse.text() @@ -276,9 +307,12 @@ async function createWebRTCConnection( const name = event.name as string const args = event.arguments as string if (!callId) { - console.warn( - '[openaiRealtime] function_call_arguments.done missing call_id/item_id', - event, + logger.errors( + 'openai.realtime function_call_arguments.done missing ids', + { + event, + source: 'openai.realtime', + }, ) break } @@ -361,7 +395,10 @@ async function createWebRTCConnection( audioElement.autoplay = true // Some browsers require this for autoplay audioElement.play().catch((e) => { - console.warn('Audio autoplay failed:', e) + logger.errors('openai.realtime audio autoplay failed', { + error: e, + source: 'openai.realtime', + }) }) // Set up AudioContext for visualization only (not playback) @@ -412,6 +449,10 @@ async function createWebRTCConnection( // Send event to server (queues if data channel not yet open) function sendEvent(event: Record) { if (dataChannel?.readyState === 'open') { + logger.provider( + `provider=openai direction=out type=${(event.type as string | undefined) ?? ''}`, + { frame: event }, + ) dataChannel.send(JSON.stringify(event)) } else { pendingEvents.push(event) @@ -421,6 +462,10 @@ async function createWebRTCConnection( // Flush any queued events (called when data channel opens) function flushPendingEvents() { for (const event of pendingEvents) { + logger.provider( + `provider=openai direction=out type=${(event.type as string | undefined) ?? ''}`, + { frame: event }, + ) dataChannel!.send(JSON.stringify(event)) } pendingEvents.length = 0 diff --git a/packages/typescript/ai-openai/src/realtime/types.ts b/packages/typescript/ai-openai/src/realtime/types.ts index f4d36d9cc..be0bf856c 100644 --- a/packages/typescript/ai-openai/src/realtime/types.ts +++ b/packages/typescript/ai-openai/src/realtime/types.ts @@ -1,4 +1,4 @@ -import type { VADConfig } from '@tanstack/ai' +import type { DebugOption, VADConfig } from '@tanstack/ai' /** * OpenAI realtime voice options @@ -64,6 +64,13 @@ export interface OpenAIRealtimeTokenOptions { export interface OpenAIRealtimeOptions { /** Connection mode (default: 'webrtc' in browser) */ connectionMode?: 'webrtc' | 'websocket' + /** + * Enable debug logging for this adapter. + * + * - `true` enables all categories (`request`, `response`, `provider`, `errors`). + * - A {@link DebugConfig} object selects categories and/or a custom sink. + */ + debug?: DebugOption } /** diff --git a/packages/typescript/ai-openai/tests/image-adapter.test.ts b/packages/typescript/ai-openai/tests/image-adapter.test.ts index 49d3353c0..429306bcd 100644 --- a/packages/typescript/ai-openai/tests/image-adapter.test.ts +++ b/packages/typescript/ai-openai/tests/image-adapter.test.ts @@ -1,4 +1,5 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { OpenAIImageAdapter, createOpenaiImage } from '../src/adapters/image' import { validateImageSize, @@ -6,6 +7,8 @@ import { validatePrompt, } from '../src/image/image-provider-options' +const testLogger = resolveDebugOption(false) + describe('OpenAI Image Adapter', () => { describe('createOpenaiImage', () => { it('creates an adapter with the provided API key', () => { @@ -162,6 +165,7 @@ describe('OpenAI Image Adapter', () => { prompt: 'A cat wearing a hat', numberOfImages: 1, size: '1024x1024', + logger: testLogger, }) expect(mockGenerate).toHaveBeenCalledWith({ @@ -202,11 +206,13 @@ describe('OpenAI Image Adapter', () => { const result1 = await adapter.generateImages({ model: 'dall-e-3', prompt: 'Test prompt', + logger: testLogger, }) const result2 = await adapter.generateImages({ model: 'dall-e-3', prompt: 'Test prompt', + logger: testLogger, }) expect(result1.id).not.toBe(result2.id) diff --git a/packages/typescript/ai-openrouter/src/adapters/image.ts b/packages/typescript/ai-openrouter/src/adapters/image.ts index 20373101c..4a2792142 100644 --- a/packages/typescript/ai-openrouter/src/adapters/image.ts +++ b/packages/typescript/ai-openrouter/src/adapters/image.ts @@ -65,10 +65,19 @@ export class OpenRouterImageAdapter< async generateImages( options: ImageGenerationOptions, ): Promise { - const { model, prompt, numberOfImages, size, modelOptions } = options + const { model, prompt, numberOfImages, size, modelOptions, logger } = + options // Use provided aspect_ratio or derive from size const aspectRatio = size ? SIZE_TO_ASPECT_RATIO[size] : undefined + logger.request( + `activity=generateImage provider=openrouter model=${this.model}`, + { + provider: 'openrouter', + model: this.model, + }, + ) + try { const response = await this.client.chat.send({ chatRequest: { @@ -110,6 +119,10 @@ export class OpenRouterImageAdapter< return this.transformResponse(model, response) } catch (error) { + logger.errors('openrouter.generateImage fatal', { + error, + source: 'openrouter.generateImage', + }) const message = (error as Error).message || 'Unknown error' throw new Error(`Image generation failed: ${message}`) } diff --git a/packages/typescript/ai-openrouter/src/adapters/summarize.ts b/packages/typescript/ai-openrouter/src/adapters/summarize.ts index 7494a8e56..aa7513522 100644 --- a/packages/typescript/ai-openrouter/src/adapters/summarize.ts +++ b/packages/typescript/ai-openrouter/src/adapters/summarize.ts @@ -56,39 +56,54 @@ export class OpenRouterSummarizeAdapter< } async summarize(options: SummarizationOptions): Promise { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) + logger.request(`activity=summarize provider=openrouter`, { + provider: 'openrouter', + model: options.model, + }) + let summary = '' const id = '' let model = options.model let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: this.maxTokens ?? options.maxLength, - temperature: this.temperature, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta + try { + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: this.maxTokens ?? options.maxLength, + temperature: this.temperature, + logger, + })) { + // AG-UI TEXT_MESSAGE_CONTENT event + if (chunk.type === 'TEXT_MESSAGE_CONTENT') { + if (chunk.content) { + summary = chunk.content + } else { + summary += chunk.delta + } + model = chunk.model || model } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage + // AG-UI RUN_FINISHED event + if (chunk.type === 'RUN_FINISHED') { + if (chunk.usage) { + usage = chunk.usage + } + } + // AG-UI RUN_ERROR event + if (chunk.type === 'RUN_ERROR') { + throw new Error(`Error during summarization: ${chunk.error?.message}`) } } - // AG-UI RUN_ERROR event - if (chunk.type === 'RUN_ERROR') { - throw new Error(`Error during summarization: ${chunk.error?.message}`) - } + } catch (error) { + logger.errors('openrouter.summarize fatal', { + error, + source: 'openrouter.summarize', + }) + throw error } return { id, model, summary, usage } @@ -97,15 +112,31 @@ export class OpenRouterSummarizeAdapter< async *summarizeStream( options: SummarizationOptions, ): AsyncIterable { + const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) - yield* this.textAdapter.chatStream({ + logger.request(`activity=summarize provider=openrouter`, { + provider: 'openrouter', model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: this.maxTokens ?? options.maxLength, - temperature: this.temperature, + stream: true, }) + + try { + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: this.maxTokens ?? options.maxLength, + temperature: this.temperature, + logger, + }) + } catch (error) { + logger.errors('openrouter.summarize fatal', { + error, + source: 'openrouter.summarize', + }) + throw error + } } private buildSummarizationPrompt(options: SummarizationOptions): string { diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 15d0768f3..52b3dc354 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -110,6 +110,7 @@ export class OpenRouterTextAdapter< let accumulatedContent = '' let responseId: string | null = null let currentModel = options.model + const { logger } = options // AG-UI lifecycle tracking const aguiState: AGUIState = { runId: options.runId ?? this.generateId(), @@ -129,12 +130,17 @@ export class OpenRouterTextAdapter< try { const requestParams = this.mapTextOptionsToSDK(options) + logger.request( + `activity=chat provider=openrouter model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'openrouter', model: this.model }, + ) const stream = await this.client.chat.send( { chatRequest: { ...requestParams, stream: true } }, { signal: options.request?.signal }, ) for await (const chunk of stream) { + logger.provider(`provider=openrouter`, { chunk }) if (chunk.id) responseId = chunk.id if (chunk.model) currentModel = chunk.model @@ -201,6 +207,10 @@ export class OpenRouterTextAdapter< }) } } catch (error) { + logger.errors('openrouter.chatStream fatal', { + error, + source: 'openrouter.chatStream', + }) // Emit RUN_STARTED if not yet emitted (error on first call) if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true @@ -248,6 +258,7 @@ export class OpenRouterTextAdapter< options: StructuredOutputOptions>, ): Promise> { const { chatOptions, outputSchema } = options + const { logger } = chatOptions const requestParams = this.mapTextOptionsToSDK(chatOptions) @@ -260,6 +271,10 @@ export class OpenRouterTextAdapter< }) try { + logger.request( + `activity=chat provider=openrouter model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, + { provider: 'openrouter', model: this.model }, + ) const result = await this.client.chat.send( { chatRequest: { @@ -285,6 +300,10 @@ export class OpenRouterTextAdapter< const parsed = JSON.parse(rawText) return { data: parsed, rawText } } catch (error: unknown) { + logger.errors('openrouter.structuredOutput fatal', { + error, + source: 'openrouter.structuredOutput', + }) if (error instanceof RequestAbortedError) { throw new Error('Structured output generation aborted') } diff --git a/packages/typescript/ai-openrouter/tests/image-adapter.test.ts b/packages/typescript/ai-openrouter/tests/image-adapter.test.ts index 904073e4e..0686cff2a 100644 --- a/packages/typescript/ai-openrouter/tests/image-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/image-adapter.test.ts @@ -1,6 +1,9 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { createOpenRouterImage } from '../src/adapters/image' +const testLogger = resolveDebugOption(false) + // Declare mockSend at module level let mockSend: any @@ -58,6 +61,7 @@ describe('OpenRouter Image Adapter', () => { const result = await adapter.generateImages({ model: 'google/gemini-2.5-flash-image', prompt: 'A futuristic city at sunset', + logger: testLogger, }) expect(mockSend).toHaveBeenCalledTimes(1) @@ -94,6 +98,7 @@ describe('OpenRouter Image Adapter', () => { model: 'google/gemini-2.5-flash-image', prompt: 'A cute robot mascot', numberOfImages: 2, + logger: testLogger, }) const callArgs = mockSend.mock.calls[0]![0].chatRequest @@ -121,6 +126,7 @@ describe('OpenRouter Image Adapter', () => { const result = await adapter.generateImages({ model: 'google/gemini-2.5-flash-image', prompt: 'A simple test image', + logger: testLogger, }) expect(result.images).toHaveLength(1) @@ -141,6 +147,7 @@ describe('OpenRouter Image Adapter', () => { model: 'google/gemini-2.5-flash-image', prompt: 'A wide landscape', size: '1344x768', // 16:9 + logger: testLogger, }) const callArgs = mockSend.mock.calls[0]![0].chatRequest @@ -162,6 +169,7 @@ describe('OpenRouter Image Adapter', () => { model: 'google/gemini-2.5-flash-image', prompt: 'A square image', size: '1024x1024', + logger: testLogger, }) const callArgs = mockSend.mock.calls[0]![0].chatRequest @@ -179,6 +187,7 @@ describe('OpenRouter Image Adapter', () => { adapter.generateImages({ model: 'invalid/model', prompt: 'Test prompt', + logger: testLogger, }), ).rejects.toThrow('Image generation failed: Model not found') }) @@ -196,6 +205,7 @@ describe('OpenRouter Image Adapter', () => { adapter.generateImages({ model: 'google/gemini-2.5-flash-image', prompt: 'Inappropriate content', + logger: testLogger, }), ).rejects.toThrow('Image generation failed: Content policy violation') }) @@ -215,6 +225,7 @@ describe('OpenRouter Image Adapter', () => { modelOptions: { image_size: '4K', }, + logger: testLogger, }) const callArgs = mockSend.mock.calls[0]![0].chatRequest diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index 25dae0daf..d67d66205 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1,9 +1,13 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' import { chat } from '@tanstack/ai' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { ChatRequest$outboundSchema } from '@openrouter/sdk/models' import { createOpenRouterText } from '../src/adapters/text' import type { OpenRouterTextModelOptions } from '../src/adapters/text' import type { StreamChunk, Tool } from '@tanstack/ai' + +// Test helper: a silent logger for test chatStream calls. +const testLogger = resolveDebugOption(false) // Declare mockSend at module level let mockSend: any @@ -304,6 +308,7 @@ describe('OpenRouter adapter option mapping', () => { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'What is the weather in Berlin?' }], tools: [weatherTool], + logger: testLogger, })) { chunks.push(chunk) } @@ -378,6 +383,7 @@ describe('OpenRouter adapter option mapping', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -434,6 +440,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -481,6 +488,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -539,6 +547,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -630,6 +639,7 @@ describe('OpenRouter AG-UI event emission', () => { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Weather in Berlin?' }], tools: [weatherTool], + logger: testLogger, })) { chunks.push(chunk) } @@ -669,6 +679,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -721,6 +732,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -768,6 +780,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -832,6 +845,7 @@ describe('OpenRouter AG-UI event emission', () => { for await (const chunk of adapter.chatStream({ model: 'openai/o1-preview', messages: [{ role: 'user', content: 'What is the meaning of life?' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -887,6 +901,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Give me a person' }], + logger: testLogger, }, outputSchema, }) @@ -954,6 +969,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Generate' }], + logger: testLogger, }, outputSchema, }) @@ -1052,6 +1068,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'List items' }], + logger: testLogger, }, outputSchema: { type: 'object' }, }) @@ -1078,6 +1095,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Give me data' }], + logger: testLogger, }, outputSchema: { type: 'object' }, }), @@ -1094,6 +1112,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Give me data' }], + logger: testLogger, }, outputSchema: { type: 'object' }, }), @@ -1119,6 +1138,7 @@ describe('OpenRouter structured output', () => { chatOptions: { model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Give me data' }], + logger: testLogger, }, outputSchema: { type: 'object' }, }), @@ -1318,6 +1338,7 @@ describe('OpenRouter duplicate event prevention', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -1353,6 +1374,7 @@ describe('OpenRouter duplicate event prevention', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -1391,6 +1413,7 @@ describe('OpenRouter duplicate event prevention', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hi' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -1433,6 +1456,7 @@ describe('OpenRouter duplicate event prevention', () => { for await (const chunk of adapter.chatStream({ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hello' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -1493,6 +1517,7 @@ describe('OpenRouter STEP event consistency', () => { for await (const chunk of adapter.chatStream({ model: 'openai/o1-preview', messages: [{ role: 'user', content: 'What is the meaning of life?' }], + logger: testLogger, })) { chunks.push(chunk) } @@ -1583,6 +1608,7 @@ describe('OpenRouter STEP event consistency', () => { for await (const chunk of adapter.chatStream({ model: 'openai/o1-preview', messages: [{ role: 'user', content: 'What is the meaning of life?' }], + logger: testLogger, })) { chunks.push(chunk) } diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 1eea2b8b7..9fa60fb33 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -24,6 +24,10 @@ "./middlewares": { "types": "./dist/esm/middlewares/index.d.ts", "import": "./dist/esm/middlewares/index.js" + }, + "./adapter-internals": { + "types": "./dist/esm/adapter-internals.d.ts", + "import": "./dist/esm/adapter-internals.js" } }, "sideEffects": false, diff --git a/packages/typescript/ai/src/activities/chat/adapter.ts b/packages/typescript/ai/src/activities/chat/adapter.ts index ac7e96e33..41a19ee8a 100644 --- a/packages/typescript/ai/src/activities/chat/adapter.ts +++ b/packages/typescript/ai/src/activities/chat/adapter.ts @@ -18,7 +18,12 @@ export interface TextAdapterConfig { } /** - * Options for structured output generation + * Options for structured output generation. + * + * The internal logger is threaded through `chatOptions.logger` (inherited from + * `TextOptions`). Adapter implementations must call `logger.request()` before + * SDK calls, `logger.provider()` for each chunk received, and `logger.errors()` + * in catch blocks. */ export interface StructuredOutputOptions { /** Text options for the request */ diff --git a/packages/typescript/ai/src/activities/chat/index.ts b/packages/typescript/ai/src/activities/chat/index.ts index c87856630..0c96ff8c5 100644 --- a/packages/typescript/ai/src/activities/chat/index.ts +++ b/packages/typescript/ai/src/activities/chat/index.ts @@ -8,6 +8,7 @@ import { devtoolsMiddleware } from '@tanstack/ai-event-client' import { stripToSpecMiddleware } from '../../strip-to-spec-middleware' import { streamToText } from '../../stream-to-response.js' +import { resolveDebugOption } from '../../logger/resolve' import { LazyToolManager } from './tools/lazy-tool-manager' import { MiddlewareAbortError, @@ -51,6 +52,8 @@ import type { ChatMiddlewareContext, ChatMiddlewarePhase, } from './middleware/types' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' // =========================== // Activity Kind @@ -168,6 +171,13 @@ export interface TextActivityOptions< * Can be used to pass request-scoped data (e.g., user ID, request context). */ context?: unknown + /** + * Enable debug logging. Pass `true` to enable all categories with the default + * console logger, `false` to silence everything, or a `DebugConfig` object for + * granular control and/or a custom `Logger`. Defaults to `undefined`, which + * means only the `errors` category is active. + */ + debug?: DebugOption } // =========================== @@ -280,7 +290,13 @@ class TextEngine< private middlewareAbortController?: AbortController private terminalHookCalled = false - constructor(config: TextEngineConfig) { + private readonly logger: InternalLogger + + constructor( + config: TextEngineConfig, + logger: InternalLogger, + ) { + this.logger = logger this.adapter = config.adapter this.params = config.params this.systemPrompts = config.params.systemPrompts || [] @@ -328,7 +344,7 @@ class TextEngine< ...(config.middleware || []), stripToSpecMiddleware(), ] - this.middlewareRunner = new MiddlewareRunner(allMiddleware) + this.middlewareRunner = new MiddlewareRunner(allMiddleware, logger) this.middlewareAbortController = new AbortController() this.middlewareCtx = { requestId: this.requestId, @@ -380,6 +396,9 @@ class TextEngine< async *run(): AsyncGenerator { this.beforeRun() + this.logger.agentLoop('run started', { + conversationId: this.middlewareCtx.conversationId, + }) try { // Run initial onConfig (phase = init) @@ -404,6 +423,10 @@ class TextEngine< return } + this.logger.agentLoop(`iteration=${this.middlewareCtx.iteration}`, { + iteration: this.middlewareCtx.iteration, + }) + await this.beginCycle() if (this.cyclePhase === 'processText') { @@ -425,6 +448,10 @@ class TextEngine< this.endCycle() } while (this.shouldContinue()) + this.logger.agentLoop('run finished', { + finishReason: this.lastFinishReason, + }) + // Call terminal onFinish hook (skip when waiting for client — stream is paused, not finished) if (!this.terminalHookCalled && this.toolPhase !== 'wait') { this.terminalHookCalled = true @@ -447,6 +474,10 @@ class TextEngine< }) } else { // Genuine error — call onError + this.logger.errors('chat run failed', { + error, + conversationId: this.middlewareCtx.conversationId, + }) await this.middlewareRunner.runOnError(this.middlewareCtx, { error, duration: Date.now() - this.streamStartTime, @@ -542,6 +573,18 @@ class TextEngine< this.middlewareCtx.phase = 'modelStream' + const providerName = + (this.adapter as { provider?: string }).provider ?? this.adapter.name + this.logger.request( + `activity=chat provider=${providerName} model=${this.params.model} messages=${this.messages.length} tools=${this.tools.length} stream=true`, + { + provider: providerName, + model: this.params.model, + messageCount: this.messages.length, + toolCount: this.tools.length, + }, + ) + for await (const chunk of this.adapter.chatStream({ model: this.params.model, messages: this.messages, @@ -553,6 +596,7 @@ class TextEngine< request: this.effectiveRequest, modelOptions, systemPrompts: this.systemPrompts, + logger: this.logger, threadId: this.threadId, runId: this.runIdOverride, })) { @@ -572,6 +616,7 @@ class TextEngine< chunk, ) for (const outputChunk of outputChunks) { + this.logger.output(`type=${outputChunk.type}`, { chunk: outputChunk }) yield outputChunk this.middlewareCtx.chunkIndex++ } @@ -728,6 +773,10 @@ class TextEngine< (eventName, data) => this.createCustomEventChunk(eventName, data), { onBeforeToolCall: async (toolCall, tool, args) => { + this.logger.tools(`phase=before name=${toolCall.function.name}`, { + name: toolCall.function.name, + args, + }) const hookCtx = { toolCall, tool, @@ -741,6 +790,10 @@ class TextEngine< ) }, onAfterToolCall: async (info) => { + this.logger.tools(`phase=after name=${info.toolName}`, { + name: info.toolName, + result: info.result, + }) await this.middlewareRunner.runOnAfterToolCall( this.middlewareCtx, info, @@ -881,6 +934,10 @@ class TextEngine< (eventName, data) => this.createCustomEventChunk(eventName, data), { onBeforeToolCall: async (toolCall, tool, args) => { + this.logger.tools(`phase=before name=${toolCall.function.name}`, { + name: toolCall.function.name, + args, + }) const hookCtx = { toolCall, tool, @@ -894,6 +951,10 @@ class TextEngine< ) }, onAfterToolCall: async (info) => { + this.logger.tools(`phase=after name=${info.toolName}`, { + name: info.toolName, + result: info.result, + }) await this.middlewareRunner.runOnAfterToolCall( this.middlewareCtx, info, @@ -1478,18 +1539,22 @@ export function chat< async function* runStreamingText( options: TextActivityOptions, ): AsyncIterable { - const { adapter, middleware, context, ...textOptions } = options + const { adapter, middleware, context, debug, ...textOptions } = options const model = adapter.model + const logger = resolveDebugOption(debug) - const engine = new TextEngine({ - adapter, - params: { ...textOptions, model } as TextOptions< - Record, - Record - >, - middleware, - context, - }) + const engine = new TextEngine( + { + adapter, + params: { ...textOptions, model, logger } as TextOptions< + Record, + Record + >, + middleware, + context, + }, + logger, + ) for await (const chunk of engine.run()) { yield chunk @@ -1520,23 +1585,28 @@ function runNonStreamingText( async function runAgenticStructuredOutput( options: TextActivityOptions, ): Promise> { - const { adapter, outputSchema, middleware, context, ...textOptions } = options + const { adapter, outputSchema, middleware, context, debug, ...textOptions } = + options const model = adapter.model + const logger = resolveDebugOption(debug) if (!outputSchema) { throw new Error('outputSchema is required for structured output') } // Create the engine and run the agentic loop - const engine = new TextEngine({ - adapter, - params: { ...textOptions, model } as TextOptions< - Record, - Record - >, - middleware, - context, - }) + const engine = new TextEngine( + { + adapter, + params: { ...textOptions, model, logger } as TextOptions< + Record, + Record + >, + middleware, + context, + }, + logger, + ) // Consume the stream to run the agentic loop for await (const _chunk of engine.run()) { @@ -1560,6 +1630,17 @@ async function runAgenticStructuredOutput( throw new Error('Failed to convert output schema to JSON Schema') } + const providerName = + (adapter as { provider?: string }).provider ?? adapter.name + logger.request( + `activity=chat-structured provider=${providerName} model=${model} messages=${finalMessages.length}`, + { + provider: providerName, + model, + messageCount: finalMessages.length, + }, + ) + // Call the adapter's structured output method with the conversation context // The adapter receives JSON Schema and can apply vendor-specific patches const result = await adapter.structuredOutput({ @@ -1567,6 +1648,7 @@ async function runAgenticStructuredOutput( ...structuredTextOptions, model, messages: finalMessages, + logger, }, outputSchema: jsonSchema, }) diff --git a/packages/typescript/ai/src/activities/chat/middleware/compose.ts b/packages/typescript/ai/src/activities/chat/middleware/compose.ts index 8e46e0fb9..b3e4cf6cc 100644 --- a/packages/typescript/ai/src/activities/chat/middleware/compose.ts +++ b/packages/typescript/ai/src/activities/chat/middleware/compose.ts @@ -1,5 +1,6 @@ import { aiEventClient } from '@tanstack/ai-event-client' import type { StreamChunk } from '../../../types' +import type { InternalLogger } from '../../../logger/internal-logger' import type { AbortInfo, AfterToolCallInfo, @@ -36,9 +37,14 @@ function instrumentCtx(ctx: ChatMiddlewareContext) { */ export class MiddlewareRunner { private readonly middlewares: ReadonlyArray + private readonly logger: InternalLogger - constructor(middlewares: ReadonlyArray) { + constructor( + middlewares: ReadonlyArray, + logger: InternalLogger, + ) { this.middlewares = middlewares + this.logger = logger } get hasMiddleware(): boolean { @@ -63,6 +69,15 @@ export class MiddlewareRunner { const hasTransform = result !== undefined && result !== null if (hasTransform) { current = { ...current, ...result } + if (!skip) { + this.logger.config( + `middleware=${mw.name ?? 'unnamed'} keys=${Object.keys(result as object).join(',')}`, + { + middleware: mw.name ?? 'unnamed', + changes: result, + }, + ) + } } if (!skip) { const base = instrumentCtx(ctx) @@ -98,6 +113,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onStart(ctx) if (!skip) { + this.logger.middleware( + `hook=onStart middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onStart' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -134,10 +153,24 @@ export class MiddlewareRunner { for (const c of chunks) { // Cast: @ag-ui/core Zod passthrough types prevent direct `.type` access const chunkType = (c as StreamChunk & { type: string }).type + if (!skip) { + this.logger.middleware( + `hook=onChunk middleware=${mw.name ?? 'unnamed'} in=${chunkType}`, + { middleware: mw.name ?? 'unnamed', hook: 'onChunk', in: c }, + ) + } const result = await mw.onChunk(ctx, c) if (result === null) { // Drop this chunk if (!skip) { + this.logger.middleware( + `hook=onChunk middleware=${mw.name ?? 'unnamed'} in=${chunkType} out=`, + { + middleware: mw.name ?? 'unnamed', + hook: 'onChunk', + dropped: true, + }, + ) aiEventClient.emit('middleware:chunk:transformed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -154,6 +187,15 @@ export class MiddlewareRunner { // Expand nextChunks.push(...result) if (!skip) { + this.logger.middleware( + `hook=onChunk middleware=${mw.name ?? 'unnamed'} in=${chunkType} out=[${result.map((r: StreamChunk) => (r as StreamChunk & { type: string }).type).join(',')}]`, + { + middleware: mw.name ?? 'unnamed', + hook: 'onChunk', + in: c, + out: result, + }, + ) aiEventClient.emit('middleware:chunk:transformed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -166,6 +208,15 @@ export class MiddlewareRunner { // Replace nextChunks.push(result) if (!skip) { + this.logger.middleware( + `hook=onChunk middleware=${mw.name ?? 'unnamed'} in=${chunkType} out=${(result as StreamChunk & { type: string }).type}`, + { + middleware: mw.name ?? 'unnamed', + hook: 'onChunk', + in: c, + out: result, + }, + ) aiEventClient.emit('middleware:chunk:transformed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -197,6 +248,10 @@ export class MiddlewareRunner { const decision = await mw.onBeforeToolCall(ctx, hookCtx) const hasTransform = decision !== undefined && decision !== null if (!skip) { + this.logger.middleware( + `hook=onBeforeToolCall middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onBeforeToolCall' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -227,6 +282,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onAfterToolCall(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onAfterToolCall middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onAfterToolCall' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -253,6 +312,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onUsage(ctx, usage) if (!skip) { + this.logger.middleware( + `hook=onUsage middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onUsage' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -279,6 +342,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onFinish(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onFinish middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onFinish' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -302,6 +369,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onAbort(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onAbort middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onAbort' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -325,6 +396,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onError(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onError middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onError' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -352,6 +427,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onIteration(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onIteration middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onIteration' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', @@ -379,6 +458,10 @@ export class MiddlewareRunner { const start = Date.now() await mw.onToolPhaseComplete(ctx, info) if (!skip) { + this.logger.middleware( + `hook=onToolPhaseComplete middleware=${mw.name ?? 'unnamed'}`, + { middleware: mw.name ?? 'unnamed', hook: 'onToolPhaseComplete' }, + ) aiEventClient.emit('middleware:hook:executed', { ...instrumentCtx(ctx), middlewareName: mw.name || 'unnamed', diff --git a/packages/typescript/ai/src/activities/generateImage/index.ts b/packages/typescript/ai/src/activities/generateImage/index.ts index ed7c7c3c1..7dd4333e4 100644 --- a/packages/typescript/ai/src/activities/generateImage/index.ts +++ b/packages/typescript/ai/src/activities/generateImage/index.ts @@ -7,6 +7,9 @@ import { aiEventClient } from '@tanstack/ai-event-client' import { streamGenerationResult } from '../stream-generation-result.js' +import { resolveDebugOption } from '../../logger/resolve' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' import type { ImageAdapter } from './adapter' import type { ImageGenerationResult, StreamChunk } from '../../types' @@ -83,6 +86,12 @@ export type ImageActivityOptions< * @default false */ stream?: TStream + /** + * Enable debug logging. Pass `true` to enable all categories, `false` to + * silence everything including errors, or a `DebugConfig` object for granular + * control and/or a custom `Logger`. + */ + debug?: DebugOption } & ({} extends ImageProviderOptionsForModel ? { /** Provider-specific options for image generation */ modelOptions?: ImageProviderOptionsForModel< @@ -188,10 +197,11 @@ async function runGenerateImage< >( options: ImageActivityOptions, ): Promise { - const { adapter, stream: _stream, ...rest } = options + const { adapter, stream: _stream, debug: _debug, ...rest } = options const model = adapter.model const requestId = createId('image') const startTime = Date.now() + const logger: InternalLogger = resolveDebugOption(options.debug) aiEventClient.emit('image:request:started', { requestId, @@ -204,7 +214,13 @@ async function runGenerateImage< timestamp: startTime, }) - return adapter.generateImages({ ...rest, model }).then((result) => { + logger.request(`activity=generateImage provider=${adapter.name}`, { + provider: adapter.name, + model, + }) + + try { + const result = await adapter.generateImages({ ...rest, model, logger }) const duration = Date.now() - startTime aiEventClient.emit('image:request:completed', { @@ -230,8 +246,18 @@ async function runGenerateImage< }) } + logger.output(`activity=generateImage count=${result.images.length}`, { + count: result.images.length, + }) + return result - }) + } catch (error) { + logger.errors('generateImage activity failed', { + error, + source: 'generateImage', + }) + throw error + } } // =========================== diff --git a/packages/typescript/ai/src/activities/generateSpeech/index.ts b/packages/typescript/ai/src/activities/generateSpeech/index.ts index 99c25fa29..dac3c2ba4 100644 --- a/packages/typescript/ai/src/activities/generateSpeech/index.ts +++ b/packages/typescript/ai/src/activities/generateSpeech/index.ts @@ -7,6 +7,9 @@ import { aiEventClient } from '@tanstack/ai-event-client' import { streamGenerationResult } from '../stream-generation-result.js' +import { resolveDebugOption } from '../../logger/resolve' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' import type { TTSAdapter } from './adapter' import type { StreamChunk, TTSResult } from '../../types' @@ -64,6 +67,12 @@ export interface TTSActivityOptions< * @default false */ stream?: TStream + /** + * Enable debug logging. Pass `true` to enable all categories, `false` to + * silence everything including errors, or a `DebugConfig` object for granular + * control and/or a custom `Logger`. + */ + debug?: DebugOption } // =========================== @@ -134,10 +143,15 @@ export function generateSpeech< async function runGenerateSpeech>( options: TTSActivityOptions, ): Promise { - const { adapter, stream: _stream, ...rest } = options + const { adapter, stream: _stream, debug: _debug, ...rest } = options const model = adapter.model const requestId = createId('speech') const startTime = Date.now() + const logger: InternalLogger = resolveDebugOption(options.debug) + const providerName = + (adapter as { name?: string; provider?: string }).provider ?? + (adapter as { name?: string }).name ?? + 'unknown' aiEventClient.emit('speech:request:started', { requestId, @@ -151,7 +165,14 @@ async function runGenerateSpeech>( timestamp: startTime, }) - return adapter.generateSpeech({ ...rest, model }).then((result) => { + logger.request(`activity=generateSpeech provider=${providerName}`, { + provider: providerName, + model, + }) + + try { + const result = await adapter.generateSpeech({ ...rest, model, logger }) + const duration = Date.now() - startTime aiEventClient.emit('speech:request:completed', { @@ -167,8 +188,19 @@ async function runGenerateSpeech>( timestamp: Date.now(), }) + logger.output(`activity=generateSpeech bytes=${result.audio.length}`, { + bytes: result.audio.length, + contentType: result.contentType, + }) + return result - }) + } catch (error) { + logger.errors('generateSpeech activity failed', { + error, + source: 'generateSpeech', + }) + throw error + } } // =========================== diff --git a/packages/typescript/ai/src/activities/generateTranscription/index.ts b/packages/typescript/ai/src/activities/generateTranscription/index.ts index a2a726903..793fdcb27 100644 --- a/packages/typescript/ai/src/activities/generateTranscription/index.ts +++ b/packages/typescript/ai/src/activities/generateTranscription/index.ts @@ -7,6 +7,9 @@ import { aiEventClient } from '@tanstack/ai-event-client' import { streamGenerationResult } from '../stream-generation-result.js' +import { resolveDebugOption } from '../../logger/resolve' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' import type { TranscriptionAdapter } from './adapter' import type { StreamChunk, TranscriptionResult } from '../../types' @@ -64,6 +67,12 @@ export interface TranscriptionActivityOptions< * @default false */ stream?: TStream + /** + * Enable debug logging. Pass `true` to enable all categories, `false` to + * silence everything including errors, or a `DebugConfig` object for granular + * control and/or a custom `Logger`. + */ + debug?: DebugOption } // =========================== @@ -156,10 +165,15 @@ async function runGenerateTranscription< >( options: TranscriptionActivityOptions, ): Promise { - const { adapter, stream: _stream, ...rest } = options + const { adapter, stream: _stream, debug: _debug, ...rest } = options const model = adapter.model const requestId = createId('transcription') const startTime = Date.now() + const logger: InternalLogger = resolveDebugOption(options.debug) + const providerName = + (adapter as { name?: string; provider?: string }).provider ?? + (adapter as { name?: string }).name ?? + 'unknown' aiEventClient.emit('transcription:request:started', { requestId, @@ -172,21 +186,39 @@ async function runGenerateTranscription< timestamp: startTime, }) - const result = await adapter.transcribe({ ...rest, model }) - const duration = Date.now() - startTime - - aiEventClient.emit('transcription:request:completed', { - requestId, - provider: adapter.name, + logger.request(`activity=generateTranscription provider=${providerName}`, { + provider: providerName, model, - text: result.text, - language: result.language, - duration, - modelOptions: rest.modelOptions as Record | undefined, - timestamp: Date.now(), }) - return result + try { + const result = await adapter.transcribe({ ...rest, model, logger }) + const duration = Date.now() - startTime + + aiEventClient.emit('transcription:request:completed', { + requestId, + provider: adapter.name, + model, + text: result.text, + language: result.language, + duration, + modelOptions: rest.modelOptions as Record | undefined, + timestamp: Date.now(), + }) + + logger.output( + `activity=generateTranscription length=${result.text.length}`, + { hasText: !!result.text }, + ) + + return result + } catch (error) { + logger.errors('generateTranscription activity failed', { + error, + source: 'generateTranscription', + }) + throw error + } } // =========================== diff --git a/packages/typescript/ai/src/activities/generateVideo/index.ts b/packages/typescript/ai/src/activities/generateVideo/index.ts index b8048fabb..61b27ea54 100644 --- a/packages/typescript/ai/src/activities/generateVideo/index.ts +++ b/packages/typescript/ai/src/activities/generateVideo/index.ts @@ -8,6 +8,9 @@ */ import { aiEventClient } from '@tanstack/ai-event-client' +import { resolveDebugOption } from '../../logger/resolve' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' import type { VideoAdapter } from './adapter' import type { StreamChunk, @@ -100,6 +103,12 @@ export type VideoCreateOptions< maxDuration?: number /** Custom run ID (stream mode only) */ runId?: string + /** + * Enable debug logging. Pass `true` to enable all categories, `false` to + * silence everything including errors, or a `DebugConfig` object for granular + * control and/or a custom `Logger`. + */ + debug?: DebugOption } & ({} extends VideoProviderOptions ? { /** Provider-specific options for video generation */ modelOptions?: VideoProviderOptions @@ -241,14 +250,38 @@ async function runCreateVideoJob< >(options: VideoCreateOptions): Promise { const { adapter, prompt, size, duration, modelOptions } = options const model = adapter.model - - return adapter.createVideoJob({ + const logger: InternalLogger = resolveDebugOption(options.debug) + const providerName = + (adapter as { name?: string; provider?: string }).provider ?? + (adapter as { name?: string }).name ?? + 'unknown' + + logger.request(`activity=generateVideo provider=${providerName}`, { + provider: providerName, model, - prompt, - size, - duration, - modelOptions, }) + + try { + const result = await adapter.createVideoJob({ + model, + prompt, + size, + duration, + modelOptions, + logger, + }) + logger.output(`activity=generateVideo jobId=${result.jobId}`, { + jobId: result.jobId, + model: result.model, + }) + return result + } catch (error) { + logger.errors('generateVideo activity failed', { + error, + source: 'generateVideo', + }) + throw error + } } function sleep(ms: number): Promise { @@ -267,6 +300,11 @@ async function* runStreamingVideoGeneration< const runId = options.runId ?? createId('run') const pollingInterval = options.pollingInterval ?? 2000 const maxDuration = options.maxDuration ?? 600_000 + const logger: InternalLogger = resolveDebugOption(options.debug) + const providerName = + (adapter as { name?: string; provider?: string }).provider ?? + (adapter as { name?: string }).name ?? + 'unknown' const threadId = createId('thread') @@ -277,6 +315,14 @@ async function* runStreamingVideoGeneration< timestamp: Date.now(), } as StreamChunk + logger.request( + `activity=generateVideo provider=${providerName} stream=true`, + { + provider: providerName, + model, + }, + ) + try { // Create the video generation job const jobResult = await adapter.createVideoJob({ @@ -285,6 +331,7 @@ async function* runStreamingVideoGeneration< size, duration, modelOptions, + logger, }) yield { @@ -316,6 +363,14 @@ async function* runStreamingVideoGeneration< if (statusResult.status === 'completed') { const urlResult = await adapter.getVideoUrl(jobResult.jobId) + logger.output( + `activity=generateVideo jobId=${jobResult.jobId} status=completed`, + { + jobId: jobResult.jobId, + url: urlResult.url, + }, + ) + yield { type: 'CUSTOM', name: 'generation:result', @@ -345,6 +400,10 @@ async function* runStreamingVideoGeneration< throw new Error('Video generation timed out') } catch (error: any) { + logger.errors('generateVideo activity failed', { + error, + source: 'generateVideo', + }) yield { type: 'RUN_ERROR', runId, diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index 7102d502e..e73bd7532 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -7,6 +7,9 @@ import { aiEventClient } from '@tanstack/ai-event-client' import { streamGenerationResult } from '../stream-generation-result.js' +import { resolveDebugOption } from '../../logger/resolve' +import type { InternalLogger } from '../../logger/internal-logger' +import type { DebugOption } from '../../logger/types' import type { SummarizeAdapter } from './adapter' import type { StreamChunk, @@ -66,6 +69,12 @@ export interface SummarizeActivityOptions< * @default false */ stream?: TStream + /** + * Enable debug logging. Pass `true` to enable all categories, `false` to + * silence everything including errors, or a `DebugConfig` object for granular + * control and/or a custom `Logger`. + */ + debug?: DebugOption } // =========================== @@ -180,6 +189,7 @@ async function runSummarize( const requestId = createId('summarize') const inputLength = text.length const startTime = Date.now() + const logger: InternalLogger = resolveDebugOption(options.debug) aiEventClient.emit('summarize:request:started', { requestId, @@ -189,30 +199,50 @@ async function runSummarize( timestamp: startTime, }) + logger.request(`activity=summarize provider=${adapter.name}`, { + provider: adapter.name, + model, + inputLength, + }) + const summarizeOptions: SummarizationOptions = { model, text, maxLength, style, focus, + logger, } - const result = await adapter.summarize(summarizeOptions) + try { + const result = await adapter.summarize(summarizeOptions) - const duration = Date.now() - startTime - const outputLength = result.summary.length + const duration = Date.now() - startTime + const outputLength = result.summary.length - aiEventClient.emit('summarize:request:completed', { - requestId, - provider: adapter.name, - model, - inputLength, - outputLength, - duration, - timestamp: Date.now(), - }) + aiEventClient.emit('summarize:request:completed', { + requestId, + provider: adapter.name, + model, + inputLength, + outputLength, + duration, + timestamp: Date.now(), + }) + + logger.output(`activity=summarize length=${outputLength}`, { + hasSummary: !!result.summary, + outputLength, + }) - return result + return result + } catch (error) { + logger.errors('summarize activity failed', { + error, + source: 'summarize', + }) + throw error + } } /** @@ -225,6 +255,13 @@ async function* runStreamingSummarize( ): AsyncIterable { const { adapter, text, maxLength, style, focus } = options const model = adapter.model + const logger: InternalLogger = resolveDebugOption(options.debug) + + logger.request(`activity=summarize provider=${adapter.name}`, { + provider: adapter.name, + model, + stream: true, + }) const summarizeOptions: SummarizationOptions = { model, @@ -232,16 +269,25 @@ async function* runStreamingSummarize( maxLength, style, focus, + logger, } - // Use real streaming if the adapter supports it - if (adapter.summarizeStream) { - yield* adapter.summarizeStream(summarizeOptions) - return - } + try { + // Use real streaming if the adapter supports it + if (adapter.summarizeStream) { + yield* adapter.summarizeStream(summarizeOptions) + return + } - // Fall back to non-streaming — wrap result with streamGenerationResult - yield* streamGenerationResult(() => adapter.summarize(summarizeOptions)) + // Fall back to non-streaming — wrap result with streamGenerationResult + yield* streamGenerationResult(() => adapter.summarize(summarizeOptions)) + } catch (error) { + logger.errors('summarize activity failed', { + error, + source: 'summarize', + }) + throw error + } } // =========================== diff --git a/packages/typescript/ai/src/adapter-internals.ts b/packages/typescript/ai/src/adapter-internals.ts new file mode 100644 index 000000000..467b0027e --- /dev/null +++ b/packages/typescript/ai/src/adapter-internals.ts @@ -0,0 +1,7 @@ +// NOTE: This module is exposed ONLY via the `@tanstack/ai/adapter-internals` +// subpath export. It gives provider adapter packages access to the internal +// logger plumbing without leaking those symbols to end users. + +export type { ResolvedCategories } from './logger/internal-logger' +export { InternalLogger } from './logger/internal-logger' +export { resolveDebugOption } from './logger/resolve' diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index 34e1b5922..e154d1f8e 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -164,3 +164,12 @@ export type { // Adapter extension utilities export { createModel, extendAdapter } from './extend-adapter' export type { ExtendedModelDef } from './extend-adapter' + +// Logger +export type { + Logger, + DebugCategories, + DebugConfig, + DebugOption, +} from './logger/types' +export { ConsoleLogger } from './logger/console-logger' diff --git a/packages/typescript/ai/src/logger/console-logger.ts b/packages/typescript/ai/src/logger/console-logger.ts new file mode 100644 index 000000000..0be666576 --- /dev/null +++ b/packages/typescript/ai/src/logger/console-logger.ts @@ -0,0 +1,49 @@ +import type { Logger } from './types' + +/** + * Default `Logger` implementation that routes each level to the matching + * `console` method: + * + * - `debug` → `console.debug` + * - `info` → `console.info` + * - `warn` → `console.warn` + * - `error` → `console.error` + * + * When a `meta` object is supplied, the message is logged first and the meta + * object is then printed via `console.dir(meta, { depth: null, colors: true })` + * so deeply nested structures (e.g. provider chunk payloads with `usage`, + * `output`, `reasoning`, `tools`) render in full instead of truncating to + * `[Object]` / `[Array]`. On Node this produces a depth-unlimited inspect + * dump; browsers present the object as an interactive tree (extra options + * are ignored). + * + * This is the logger used when `debug` is enabled on any activity and no + * custom `logger` is supplied via `debug: { logger }`. + */ +const DIR_OPTIONS = { depth: null, colors: true } as const + +export class ConsoleLogger implements Logger { + /** Log a debug-level message; forwards to `console.debug`. */ + debug(message: string, meta?: Record): void { + console.debug(message) + if (meta !== undefined) console.dir(meta, DIR_OPTIONS) + } + + /** Log an info-level message; forwards to `console.info`. */ + info(message: string, meta?: Record): void { + console.info(message) + if (meta !== undefined) console.dir(meta, DIR_OPTIONS) + } + + /** Log a warning-level message; forwards to `console.warn`. */ + warn(message: string, meta?: Record): void { + console.warn(message) + if (meta !== undefined) console.dir(meta, DIR_OPTIONS) + } + + /** Log an error-level message; forwards to `console.error`. */ + error(message: string, meta?: Record): void { + console.error(message) + if (meta !== undefined) console.dir(meta, DIR_OPTIONS) + } +} diff --git a/packages/typescript/ai/src/logger/internal-logger.ts b/packages/typescript/ai/src/logger/internal-logger.ts new file mode 100644 index 000000000..456e7c71a --- /dev/null +++ b/packages/typescript/ai/src/logger/internal-logger.ts @@ -0,0 +1,102 @@ +import type { DebugCategories, Logger } from './types' + +/** + * Fully-resolved categories map. Every flag is a definite boolean (never + * undefined), produced by `resolveDebugOption` from a `DebugOption`. + */ +export type ResolvedCategories = Required + +/** + * Package-internal logger wrapper used by every activity and adapter in + * `@tanstack/ai`. Wraps a user-supplied (or default `ConsoleLogger`) `Logger` + * plus a fully-resolved per-category map. Each category has a dedicated + * method that no-ops when its flag is `false`, or prepends a + * `[tanstack-ai:] ` prefix and calls the underlying logger's + * `error` (for the `errors` category) or `debug` (for everything else). + * + * Not exported from the package root. Adapter packages consume it via the + * `@tanstack/ai/adapter-internals` subpath export. + */ +/** + * Emoji marker per category — bracketing the `[tanstack-ai:]` tag on + * both sides makes it trivial to visually pick out a category when scanning + * dense streaming logs. + */ +const CATEGORY_EMOJI: Record = { + request: '📤', + provider: '📥', + output: '📨', + middleware: '🧩', + tools: '🔧', + agentLoop: '🔁', + config: '⚙️', + errors: '❌', +} + +export class InternalLogger { + constructor( + private readonly logger: Logger, + private readonly categories: ResolvedCategories, + ) {} + + /** Whether a category is enabled. Cheap, safe to call on hot paths. */ + isEnabled(category: keyof ResolvedCategories): boolean { + return this.categories[category] + } + + private emit( + level: 'debug' | 'error', + category: keyof ResolvedCategories, + message: string, + meta?: Record, + ): void { + if (!this.categories[category]) return + const emoji = CATEGORY_EMOJI[category] + const prefixed = `${emoji} [tanstack-ai:${category}] ${emoji} ${message}` + if (level === 'error') this.logger.error(prefixed, meta) + else this.logger.debug(prefixed, meta) + } + + /** Log a raw chunk/frame received from a provider SDK. */ + provider(message: string, meta?: Record): void { + this.emit('debug', 'provider', message, meta) + } + + /** Log a chunk/result yielded to the consumer after middleware. */ + output(message: string, meta?: Record): void { + this.emit('debug', 'output', message, meta) + } + + /** Log inputs/outputs around a middleware hook invocation. Chat-only. */ + middleware(message: string, meta?: Record): void { + this.emit('debug', 'middleware', message, meta) + } + + /** Log before/after a tool-call execution. Chat-only. */ + tools(message: string, meta?: Record): void { + this.emit('debug', 'tools', message, meta) + } + + /** Log an agent-loop iteration marker or phase transition. Chat-only. */ + agentLoop(message: string, meta?: Record): void { + this.emit('debug', 'agentLoop', message, meta) + } + + /** Log a config transform returned by a middleware `onConfig` hook. Chat-only. */ + config(message: string, meta?: Record): void { + this.emit('debug', 'config', message, meta) + } + + /** + * Log a caught error. Defaults to on even when `debug` is unspecified. + * Uses the underlying logger's `error` level. + */ + errors(message: string, meta?: Record): void { + this.emit('error', 'errors', message, meta) + } + + /** Log outgoing request metadata before an adapter SDK call. */ + request(message: string, meta?: Record): void { + this.emit('debug', 'request', message, meta) + } +} diff --git a/packages/typescript/ai/src/logger/resolve.ts b/packages/typescript/ai/src/logger/resolve.ts new file mode 100644 index 000000000..688240d48 --- /dev/null +++ b/packages/typescript/ai/src/logger/resolve.ts @@ -0,0 +1,72 @@ +import { ConsoleLogger } from './console-logger' +import { InternalLogger } from './internal-logger' +import type { ResolvedCategories } from './internal-logger' +import type { DebugCategories, DebugConfig, DebugOption, Logger } from './types' + +const ALL_OFF: ResolvedCategories = { + provider: false, + output: false, + middleware: false, + tools: false, + agentLoop: false, + config: false, + errors: false, + request: false, +} + +const ALL_ON: ResolvedCategories = { + provider: true, + output: true, + middleware: true, + tools: true, + agentLoop: true, + config: true, + errors: true, + request: true, +} + +const errorsOnlyCategories = (): ResolvedCategories => ({ + ...ALL_OFF, + errors: true, +}) + +const resolveCategoriesFromPartial = ( + partial: DebugCategories, +): ResolvedCategories => ({ + provider: partial.provider ?? true, + output: partial.output ?? true, + middleware: partial.middleware ?? true, + tools: partial.tools ?? true, + agentLoop: partial.agentLoop ?? true, + config: partial.config ?? true, + errors: partial.errors ?? true, + request: partial.request ?? true, +}) + +/** + * Normalize a `DebugOption` into an `InternalLogger` ready to be threaded + * through the library's activities and adapters. See the `DebugOption` + * resolution table in the spec for the complete rules. + * + * - `undefined`: only the `errors` category is enabled; default `ConsoleLogger`. + * - `true`: all categories enabled; default `ConsoleLogger`. + * - `false`: all categories disabled (including `errors`); default `ConsoleLogger`. + * - `DebugConfig`: each unspecified category defaults to `true`; an optional + * `logger` replaces the default `ConsoleLogger`. + */ +export function resolveDebugOption( + debug: DebugOption | undefined, +): InternalLogger { + if (debug === undefined) { + return new InternalLogger(new ConsoleLogger(), errorsOnlyCategories()) + } + if (debug === true) { + return new InternalLogger(new ConsoleLogger(), ALL_ON) + } + if (debug === false) { + return new InternalLogger(new ConsoleLogger(), ALL_OFF) + } + const { logger, ...cats }: DebugConfig = debug + const userLogger: Logger = logger ?? new ConsoleLogger() + return new InternalLogger(userLogger, resolveCategoriesFromPartial(cats)) +} diff --git a/packages/typescript/ai/src/logger/types.ts b/packages/typescript/ai/src/logger/types.ts new file mode 100644 index 000000000..a44157b4e --- /dev/null +++ b/packages/typescript/ai/src/logger/types.ts @@ -0,0 +1,78 @@ +/** + * Pluggable logger interface consumed by every `@tanstack/ai` activity when `debug` is enabled. Supply a custom implementation via `debug: { logger }` on `chat()`, `summarize()`, `generateImage()`, etc. The four methods correspond to log levels: use `debug` for chunk-level diagnostic output, `info`/`warn` for notable events, `error` for caught exceptions. + */ +export interface Logger { + /** + * Called for chunk-level diagnostic output (raw provider chunks, per-chunk output, agent-loop iteration markers). + * @param meta Structured data forwarded to the underlying logger. Loggers like pino will preserve this as a structured record; console-based loggers pass it as the second argument to `console.`. + */ + debug: (message: string, meta?: Record) => void + /** + * Called for notable informational events (outgoing requests, tool invocations, middleware transitions). + * @param meta Structured data forwarded to the underlying logger. Loggers like pino will preserve this as a structured record; console-based loggers pass it as the second argument to `console.`. + */ + info: (message: string, meta?: Record) => void + /** + * Called for notable warnings that don't halt execution (deprecations, recoverable anomalies). + * @param meta Structured data forwarded to the underlying logger. Loggers like pino will preserve this as a structured record; console-based loggers pass it as the second argument to `console.`. + */ + warn: (message: string, meta?: Record) => void + /** + * Called for caught exceptions throughout the pipeline. + * @param meta Structured data forwarded to the underlying logger. Loggers like pino will preserve this as a structured record; console-based loggers pass it as the second argument to `console.`. + */ + error: (message: string, meta?: Record) => void +} + +/** + * Per-category toggles for debug logging. Each flag enables or disables one class of log message. Unspecified flags default to `true` when `DebugConfig` is partially specified; `undefined` on the `debug` option defaults all flags to `false` except `errors`. + */ +export interface DebugCategories { + /** + * Raw chunks/frames received from a provider SDK (OpenAI, Anthropic, Gemini, Ollama, Grok, Groq, OpenRouter, fal, ElevenLabs). Emitted inside every streaming adapter's chunk loop. + */ + provider?: boolean + /** + * Chunks/results yielded to the consumer after all middleware. For streaming activities this fires per chunk; for non-streaming activities it fires once per result. + */ + output?: boolean + /** + * Inputs and outputs around each middleware hook invocation. Chat-only. + */ + middleware?: boolean + /** + * Before/after tool-call execution in the chat agent loop. Chat-only. + */ + tools?: boolean + /** + * Iteration markers and phase transitions in the chat agent loop. Chat-only. + */ + agentLoop?: boolean + /** + * Config transforms returned by middleware `onConfig` hooks. Chat-only. + */ + config?: boolean + /** + * Caught errors throughout the pipeline. Unlike other categories, defaults to `true` even when `debug` is unspecified. Explicitly set `errors: false` or `debug: false` to silence. + */ + errors?: boolean + /** + * Outgoing call metadata (provider, model, message/tool counts) emitted before each adapter SDK call. + */ + request?: boolean +} + +/** + * Granular debug configuration combining per-category toggles with an optional custom logger. Any unspecified category flag defaults to `true`. + */ +export interface DebugConfig extends DebugCategories { + /** + * Custom `Logger` implementation. When omitted, a default `ConsoleLogger` routes output to `console.debug`/`info`/`warn`/`error`. + */ + logger?: Logger +} + +/** + * The shape accepted by the `debug` option on every `@tanstack/ai` activity. Pass `true` to enable all categories with the default console logger; `false` to silence everything including errors; an object for granular control. + */ +export type DebugOption = boolean | DebugConfig diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index a17793f13..18bcf8223 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -1,4 +1,5 @@ import type { StandardJSONSchemaV1 } from '@standard-schema/spec' +import type { InternalLogger } from './logger/internal-logger' import type { BaseEvent as AGUIBaseEvent, CustomEvent as AGUICustomEvent, @@ -738,6 +739,14 @@ export interface TextOptions< * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortController */ abortController?: AbortController + + /** + * Internal logger threaded from the chat entry point. Adapter implementations + * must call `logger.request()` before SDK calls, `logger.provider()` for each + * chunk received, and `logger.errors()` in catch blocks. + */ + logger: InternalLogger + /** * Thread ID for AG-UI protocol run correlation. * When provided, this will be used in RunStartedEvent and RunFinishedEvent. @@ -1163,6 +1172,11 @@ export interface SummarizationOptions { maxLength?: number style?: 'bullet-points' | 'paragraph' | 'concise' focus?: Array + /** + * Internal logger threaded from the summarize() entry point. Adapters must + * call logger.request() before the SDK call and logger.errors() in catch blocks. + */ + logger: InternalLogger } export interface SummarizationResult { @@ -1198,6 +1212,11 @@ export interface ImageGenerationOptions< size?: TSize /** Model-specific options for image generation */ modelOptions?: TProviderOptions + /** + * Internal logger threaded from the generateImage() entry point. Adapters must + * call logger.request() before the SDK call and logger.errors() in catch blocks. + */ + logger: InternalLogger } /** @@ -1254,6 +1273,11 @@ export interface VideoGenerationOptions< duration?: number /** Model-specific options for video generation */ modelOptions?: TProviderOptions + /** + * Internal logger threaded from the generateVideo() entry point. Adapters must + * call logger.request() before the SDK call and logger.errors() in catch blocks. + */ + logger: InternalLogger } /** @@ -1319,6 +1343,12 @@ export interface TTSOptions { speed?: number /** Model-specific options for TTS generation */ modelOptions?: TProviderOptions + /** + * Internal logger threaded from the generateSpeech() entry point. Adapters + * must call logger.request() before the SDK call and logger.errors() in + * catch blocks. + */ + logger: InternalLogger } /** @@ -1362,6 +1392,12 @@ export interface TranscriptionOptions< responseFormat?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt' /** Model-specific options for transcription */ modelOptions?: TProviderOptions + /** + * Internal logger threaded from the generateTranscription() entry point. + * Adapters must call logger.request() before the SDK call and logger.errors() + * in catch blocks. + */ + logger: InternalLogger } /** diff --git a/packages/typescript/ai/tests/debug-logging-activities.test.ts b/packages/typescript/ai/tests/debug-logging-activities.test.ts new file mode 100644 index 000000000..9a4a56b29 --- /dev/null +++ b/packages/typescript/ai/tests/debug-logging-activities.test.ts @@ -0,0 +1,239 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { + generateImage, + generateSpeech, + generateTranscription, + generateVideo, + summarize, +} from '../src/index' +import type { Logger } from '../src/logger/types' + +// ============================================================================ +// Helpers +// ============================================================================ + +const makeSpyLogger = () => ({ + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}) + +const logPrefixes = ( + calls: ReadonlyArray>, +): Array => + calls.map((call) => { + const m = call[0] + return typeof m === 'string' ? m : String(m) + }) + +// ============================================================================ +// Tests +// ============================================================================ + +describe('debug logging — non-chat activities', () => { + let logger: ReturnType + beforeEach(() => { + logger = makeSpyLogger() + }) + + it('summarize emits request and output categories', async () => { + const adapter = { + kind: 'summarize' as const, + name: 'mock', + model: 'mock-model', + summarize: vi.fn(async () => ({ summary: 'done' })), + } + + await summarize({ + adapter: adapter as any, + text: 'long text to summarize', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe(true) + expect(msgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('generateImage emits request and output categories', async () => { + const adapter = { + kind: 'image' as const, + name: 'mock', + model: 'mock-image-model', + generateImages: vi.fn(async () => ({ + images: [{ url: 'https://example.com/image.png' }], + })), + } + + await generateImage({ + adapter: adapter as any, + prompt: 'a sunset over the ocean', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe(true) + expect(msgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('generateSpeech emits request and output categories', async () => { + const adapter = { + kind: 'tts' as const, + name: 'mock', + model: 'mock-tts-model', + generateSpeech: vi.fn(async () => ({ + audio: 'base64-audio', + format: 'mp3', + contentType: 'audio/mpeg', + })), + } + + await generateSpeech({ + adapter: adapter as any, + text: 'hello world', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe(true) + expect(msgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('generateTranscription emits request and output categories', async () => { + const adapter = { + kind: 'transcription' as const, + name: 'mock', + model: 'mock-transcription-model', + transcribe: vi.fn(async () => ({ + text: 'transcribed text', + language: 'en', + })), + } + + await generateTranscription({ + adapter: adapter as any, + audio: 'base64-audio-data', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe(true) + expect(msgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('generateVideo emits request and output categories', async () => { + const adapter = { + kind: 'video' as const, + name: 'mock', + model: 'mock-video-model', + createVideoJob: vi.fn(async () => ({ + jobId: 'job-123', + model: 'mock-video-model', + })), + getVideoStatus: vi.fn(async () => ({ status: 'completed' as const })), + getVideoUrl: vi.fn(async () => ({ url: 'https://example.com/v.mp4' })), + } + + await generateVideo({ + adapter: adapter as any, + prompt: 'a cat walking across a table', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe(true) + expect(msgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('chat-only categories never fire for non-chat activities', async () => { + const adapter = { + kind: 'summarize' as const, + name: 'mock', + model: 'mock-model', + summarize: vi.fn(async () => ({ summary: 'x' })), + } + + await summarize({ + adapter: adapter as any, + text: 'x', + debug: { logger: logger as unknown as Logger }, + }) + + const msgs = logPrefixes(logger.debug.mock.calls) + for (const cat of ['middleware', 'tools', 'agentLoop', 'config']) { + expect(msgs.some((m) => m.includes(`[tanstack-ai:${cat}]`))).toBe(false) + } + }) + + it('adapter errors reach the errors category even when debug is unspecified', async () => { + const errSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + const adapter = { + kind: 'summarize' as const, + name: 'mock', + model: 'mock-model', + summarize: vi.fn(async () => { + throw new Error('boom') + }), + } + + await expect( + summarize({ adapter: adapter as any, text: 'x' }), + ).rejects.toThrow('boom') + + const msgs = logPrefixes(errSpy.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:errors]'))).toBe(true) + errSpy.mockRestore() + }) + + it('adapter errors in generateImage route to errors category via custom logger', async () => { + const adapter = { + kind: 'image' as const, + name: 'mock', + model: 'mock-image-model', + generateImages: vi.fn(async () => { + throw new Error('image boom') + }), + } + + await expect( + generateImage({ + adapter: adapter as any, + prompt: 'x', + debug: { logger: logger as unknown as Logger }, + }), + ).rejects.toThrow('image boom') + + expect(logger.error).toHaveBeenCalled() + const msgs = logPrefixes(logger.error.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:errors]'))).toBe(true) + }) + + it('debug: false on non-chat activity silences errors too', async () => { + const adapter = { + kind: 'summarize' as const, + name: 'mock', + model: 'mock-model', + summarize: vi.fn(async () => { + throw new Error('silent boom') + }), + } + + await expect( + summarize({ + adapter: adapter as any, + text: 'x', + debug: { + logger: logger as unknown as Logger, + errors: false, + provider: false, + output: false, + request: false, + }, + }), + ).rejects.toThrow('silent boom') + + expect(logger.debug).not.toHaveBeenCalled() + expect(logger.error).not.toHaveBeenCalled() + }) +}) diff --git a/packages/typescript/ai/tests/debug-logging-chat.test.ts b/packages/typescript/ai/tests/debug-logging-chat.test.ts new file mode 100644 index 000000000..bfe90366c --- /dev/null +++ b/packages/typescript/ai/tests/debug-logging-chat.test.ts @@ -0,0 +1,274 @@ +import { describe, expect, it, vi } from 'vitest' +import { chat } from '../src/activities/chat/index' +import type { Logger } from '../src/logger/types' +import type { StreamChunk } from '../src/types' +import { collectChunks, createMockAdapter, ev } from './test-utils' + +// ============================================================================ +// Helpers +// ============================================================================ + +const makeSpyLogger = () => ({ + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}) + +/** + * Create a mock adapter whose chatStream throws once iteration begins. + * Exercises the error path through the chat pipeline. + */ +function createFailingMockAdapter( + message = 'mock adapter failure', +): ReturnType { + return createMockAdapter({ + chatStreamFn: () => + (async function* (): AsyncIterable { + throw new Error(message) + })(), + }) +} + +const logPrefixes = ( + calls: ReadonlyArray>, +): Array => + calls.map((call) => { + const m = call[0] + return typeof m === 'string' ? m : String(m) + }) + +// ============================================================================ +// Tests +// ============================================================================ + +describe('debug logging — chat integration', () => { + it('debug: true emits logs across request, agentLoop, output, and more', async () => { + const logger = makeSpyLogger() + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('hi'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + debug: { logger: logger as unknown as Logger }, + }) + await collectChunks(stream as AsyncIterable) + + const debugMsgs = logPrefixes(logger.debug.mock.calls) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:request]'))).toBe( + true, + ) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:agentLoop]'))).toBe( + true, + ) + }) + + it('debug: { middleware: false } silences middleware logs only', async () => { + const logger = makeSpyLogger() + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('hi'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + middleware: [ + { + name: 'passthrough', + onChunk: (_ctx, c) => c, + }, + ], + debug: { logger: logger as unknown as Logger, middleware: false }, + }) + await collectChunks(stream as AsyncIterable) + + const debugMsgs = logPrefixes(logger.debug.mock.calls) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:middleware]'))).toBe( + false, + ) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:output]'))).toBe(true) + }) + + it('granular flags: middleware logs fire when middleware: true', async () => { + const logger = makeSpyLogger() + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('hi'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + middleware: [ + { + name: 'passthrough', + onChunk: (_ctx, c) => c, + }, + ], + debug: { logger: logger as unknown as Logger }, + }) + await collectChunks(stream as AsyncIterable) + + const debugMsgs = logPrefixes(logger.debug.mock.calls) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:middleware]'))).toBe( + true, + ) + }) + + it('debug: false produces zero logs, even when adapter throws', async () => { + const logger = makeSpyLogger() + const { adapter } = createFailingMockAdapter() + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + debug: { + logger: logger as unknown as Logger, + errors: false, + provider: false, + output: false, + middleware: false, + tools: false, + agentLoop: false, + config: false, + request: false, + }, + }) + try { + await collectChunks(stream as AsyncIterable) + } catch { + // expected — adapter throws + } + + expect(logger.debug).not.toHaveBeenCalled() + expect(logger.error).not.toHaveBeenCalled() + expect(logger.info).not.toHaveBeenCalled() + expect(logger.warn).not.toHaveBeenCalled() + }) + + it('omitted debug — errors still log via default ConsoleLogger', async () => { + const errSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + const debugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}) + const { adapter } = createFailingMockAdapter() + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + }) + try { + await collectChunks(stream as AsyncIterable) + } catch { + // expected — adapter throws + } + + const msgs = logPrefixes(errSpy.mock.calls) + expect(msgs.some((m) => m.includes('[tanstack-ai:errors]'))).toBe(true) + + // No debug-level category should have leaked when `debug` is unspecified. + const debugMsgs = logPrefixes(debugSpy.mock.calls) + expect(debugMsgs.some((m) => m.includes('[tanstack-ai:'))).toBe(false) + + errSpy.mockRestore() + debugSpy.mockRestore() + }) + + it('custom logger receives calls with meta', async () => { + const logger = makeSpyLogger() + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('hi'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + debug: { logger: logger as unknown as Logger }, + }) + await collectChunks(stream as AsyncIterable) + + expect(logger.debug).toHaveBeenCalled() + for (const [, meta] of logger.debug.mock.calls) { + if (meta !== undefined) { + expect(typeof meta).toBe('object') + expect(meta).not.toBeNull() + } + } + }) + + // Task 43 — error paths route to the errors category. + it('adapter throws — custom logger receives error call tagged [tanstack-ai:errors]', async () => { + const logger = makeSpyLogger() + const { adapter } = createFailingMockAdapter('boom') + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + debug: { logger: logger as unknown as Logger }, + }) + try { + await collectChunks(stream as AsyncIterable) + } catch { + // expected — adapter throws + } + + expect(logger.error).toHaveBeenCalled() + const prefixes = logPrefixes(logger.error.mock.calls) + expect(prefixes.some((m) => m.includes('[tanstack-ai:errors]'))).toBe(true) + }) + + it('debug: true with failing adapter still routes error to errors category', async () => { + const errSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + const debugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}) + const { adapter } = createFailingMockAdapter('boom-default') + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'hello' }], + debug: true, + }) + try { + await collectChunks(stream as AsyncIterable) + } catch { + // expected — adapter throws + } + + const errMsgs = logPrefixes(errSpy.mock.calls) + expect(errMsgs.some((m) => m.includes('[tanstack-ai:errors]'))).toBe(true) + + errSpy.mockRestore() + debugSpy.mockRestore() + }) +}) diff --git a/packages/typescript/ai/tests/logger/console-logger.test.ts b/packages/typescript/ai/tests/logger/console-logger.test.ts new file mode 100644 index 000000000..b5e88c92c --- /dev/null +++ b/packages/typescript/ai/tests/logger/console-logger.test.ts @@ -0,0 +1,64 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { ConsoleLogger } from '../../src/logger/console-logger' + +describe('ConsoleLogger', () => { + const debugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}) + const infoSpy = vi.spyOn(console, 'info').mockImplementation(() => {}) + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}) + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + const dirSpy = vi.spyOn(console, 'dir').mockImplementation(() => {}) + + afterEach(() => { + debugSpy.mockClear() + infoSpy.mockClear() + warnSpy.mockClear() + errorSpy.mockClear() + dirSpy.mockClear() + }) + + it('routes debug to console.debug', () => { + new ConsoleLogger().debug('hello') + expect(debugSpy).toHaveBeenCalledWith('hello') + expect(dirSpy).not.toHaveBeenCalled() + }) + + it('routes info to console.info', () => { + new ConsoleLogger().info('hello') + expect(infoSpy).toHaveBeenCalledWith('hello') + expect(dirSpy).not.toHaveBeenCalled() + }) + + it('routes warn to console.warn', () => { + new ConsoleLogger().warn('hello') + expect(warnSpy).toHaveBeenCalledWith('hello') + expect(dirSpy).not.toHaveBeenCalled() + }) + + it('routes error to console.error', () => { + new ConsoleLogger().error('oops') + expect(errorSpy).toHaveBeenCalledWith('oops') + expect(dirSpy).not.toHaveBeenCalled() + }) + + it('prints meta via console.dir with depth: null when provided', () => { + const meta = { key: 1 } + new ConsoleLogger().debug('msg', meta) + expect(debugSpy).toHaveBeenCalledWith('msg') + expect(dirSpy).toHaveBeenCalledWith(meta, { depth: null, colors: true }) + }) + + it('omits console.dir when meta is not provided', () => { + new ConsoleLogger().info('msg') + expect(infoSpy).toHaveBeenCalledWith('msg') + expect(infoSpy.mock.calls[0]?.length).toBe(1) + expect(dirSpy).not.toHaveBeenCalled() + }) + + it('implements the Logger interface', () => { + const logger: import('../../src/logger/types').Logger = new ConsoleLogger() + expect(typeof logger.debug).toBe('function') + expect(typeof logger.info).toBe('function') + expect(typeof logger.warn).toBe('function') + expect(typeof logger.error).toBe('function') + }) +}) diff --git a/packages/typescript/ai/tests/logger/internal-logger.test.ts b/packages/typescript/ai/tests/logger/internal-logger.test.ts new file mode 100644 index 000000000..56d6c69b0 --- /dev/null +++ b/packages/typescript/ai/tests/logger/internal-logger.test.ts @@ -0,0 +1,111 @@ +import { describe, expect, it } from 'vitest' +import { InternalLogger } from '../../src/logger/internal-logger' +import type { Logger } from '../../src/logger/types' + +type SpyCall = [keyof Logger, string, Record?] + +const makeSpyLogger = () => { + const calls: Array = [] + const logger: Logger = { + debug: (m, meta) => { + calls.push(['debug', m, meta]) + }, + info: (m, meta) => { + calls.push(['info', m, meta]) + }, + warn: (m, meta) => { + calls.push(['warn', m, meta]) + }, + error: (m, meta) => { + calls.push(['error', m, meta]) + }, + } + return { logger, calls } +} + +const allOn = { + provider: true, + output: true, + middleware: true, + tools: true, + agentLoop: true, + config: true, + errors: true, + request: true, +} + +const allOff = { + provider: false, + output: false, + middleware: false, + tools: false, + agentLoop: false, + config: false, + errors: false, + request: false, +} + +describe('InternalLogger', () => { + it('prepends emoji + [tanstack-ai:] + emoji prefix on each category method', () => { + const { logger, calls } = makeSpyLogger() + const il = new InternalLogger(logger, allOn) + il.provider('received', { type: 'text' }) + il.output('yielded') + il.middleware('before') + il.tools('called') + il.agentLoop('iter 1') + il.config('transform') + il.errors('boom', { err: new Error('x') }) + il.request('started') + expect(calls).toEqual([ + ['debug', '📥 [tanstack-ai:provider] 📥 received', { type: 'text' }], + ['debug', '📨 [tanstack-ai:output] 📨 yielded', undefined], + ['debug', '🧩 [tanstack-ai:middleware] 🧩 before', undefined], + ['debug', '🔧 [tanstack-ai:tools] 🔧 called', undefined], + ['debug', '🔁 [tanstack-ai:agentLoop] 🔁 iter 1', undefined], + ['debug', '⚙️ [tanstack-ai:config] ⚙️ transform', undefined], + ['error', '❌ [tanstack-ai:errors] ❌ boom', { err: expect.any(Error) }], + ['debug', '📤 [tanstack-ai:request] 📤 started', undefined], + ]) + }) + + it('no-ops when category is disabled', () => { + const { logger, calls } = makeSpyLogger() + const il = new InternalLogger(logger, { ...allOn, middleware: false }) + il.middleware('x') + il.provider('y') + expect(calls).toEqual([ + ['debug', '📥 [tanstack-ai:provider] 📥 y', undefined], + ]) + }) + + it('all categories off produces zero calls', () => { + const { logger, calls } = makeSpyLogger() + const il = new InternalLogger(logger, allOff) + il.provider('x') + il.output('x') + il.middleware('x') + il.tools('x') + il.agentLoop('x') + il.config('x') + il.errors('x') + il.request('x') + expect(calls).toEqual([]) + }) + + it('errors uses logger.error; everything else uses logger.debug', () => { + const { logger, calls } = makeSpyLogger() + const il = new InternalLogger(logger, allOn) + il.errors('e') + il.provider('p') + expect(calls[0]?.[0]).toBe('error') + expect(calls[1]?.[0]).toBe('debug') + }) + + it('exposes isEnabled(category) helper for hot-path guards', () => { + const { logger } = makeSpyLogger() + const il = new InternalLogger(logger, { ...allOn, provider: false }) + expect(il.isEnabled('provider')).toBe(false) + expect(il.isEnabled('output')).toBe(true) + }) +}) diff --git a/packages/typescript/ai/tests/logger/resolve.test.ts b/packages/typescript/ai/tests/logger/resolve.test.ts new file mode 100644 index 000000000..f8960620c --- /dev/null +++ b/packages/typescript/ai/tests/logger/resolve.test.ts @@ -0,0 +1,106 @@ +import { describe, expect, it, vi } from 'vitest' +import { ConsoleLogger } from '../../src/logger/console-logger' +import { InternalLogger } from '../../src/logger/internal-logger' +import { resolveDebugOption } from '../../src/logger/resolve' +import type { Logger } from '../../src/logger/types' + +const makeCustomLogger = (): Logger => ({ + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}) + +const ALL_CATEGORIES = [ + 'provider', + 'output', + 'middleware', + 'tools', + 'agentLoop', + 'config', + 'errors', + 'request', +] as const + +describe('resolveDebugOption', () => { + it('returns an InternalLogger for every input shape', () => { + expect(resolveDebugOption(undefined)).toBeInstanceOf(InternalLogger) + expect(resolveDebugOption(true)).toBeInstanceOf(InternalLogger) + expect(resolveDebugOption(false)).toBeInstanceOf(InternalLogger) + expect(resolveDebugOption({})).toBeInstanceOf(InternalLogger) + expect(resolveDebugOption({ logger: makeCustomLogger() })).toBeInstanceOf( + InternalLogger, + ) + }) + + it('undefined → errors=true, others=false', () => { + const il = resolveDebugOption(undefined) + expect(il.isEnabled('errors')).toBe(true) + for (const cat of ALL_CATEGORIES.filter((c) => c !== 'errors')) { + expect(il.isEnabled(cat)).toBe(false) + } + }) + + it('true → all categories on', () => { + const il = resolveDebugOption(true) + for (const cat of ALL_CATEGORIES) expect(il.isEnabled(cat)).toBe(true) + }) + + it('false → all categories off (incl. errors)', () => { + const il = resolveDebugOption(false) + for (const cat of ALL_CATEGORIES) expect(il.isEnabled(cat)).toBe(false) + }) + + it('empty object → all categories on with default ConsoleLogger', () => { + const il = resolveDebugOption({}) + for (const cat of ALL_CATEGORIES) expect(il.isEnabled(cat)).toBe(true) + }) + + it('partial object: unspecified categories default to true, explicit false is respected', () => { + const il = resolveDebugOption({ middleware: false }) + expect(il.isEnabled('middleware')).toBe(false) + expect(il.isEnabled('provider')).toBe(true) + expect(il.isEnabled('output')).toBe(true) + expect(il.isEnabled('errors')).toBe(true) + }) + + it('explicit errors=false is respected', () => { + const il = resolveDebugOption({ errors: false }) + expect(il.isEnabled('errors')).toBe(false) + expect(il.isEnabled('provider')).toBe(true) + }) + + it('{ logger } → uses supplied logger and all categories on', () => { + const custom = makeCustomLogger() + const il = resolveDebugOption({ logger: custom }) + il.provider('x') + expect(custom.debug).toHaveBeenCalledWith( + '📥 [tanstack-ai:provider] 📥 x', + undefined, + ) + for (const cat of ALL_CATEGORIES) expect(il.isEnabled(cat)).toBe(true) + }) + + it('{ logger, tools: false } → custom logger, tools off, others on', () => { + const custom = makeCustomLogger() + const il = resolveDebugOption({ logger: custom, tools: false }) + expect(il.isEnabled('tools')).toBe(false) + expect(il.isEnabled('provider')).toBe(true) + il.provider('p') + expect(custom.debug).toHaveBeenCalledWith( + '📥 [tanstack-ai:provider] 📥 p', + undefined, + ) + }) + + it('default instance routes through a ConsoleLogger', () => { + const spy = vi.spyOn(console, 'debug').mockImplementation(() => {}) + resolveDebugOption(true).provider('hi') + expect(spy).toHaveBeenCalledWith('📥 [tanstack-ai:provider] 📥 hi') + spy.mockRestore() + }) +}) + +// Keep ConsoleLogger import used: ensure the default is indeed a ConsoleLogger +// (covered indirectly by the last test). +void ConsoleLogger diff --git a/packages/typescript/ai/tests/logger/types.test.ts b/packages/typescript/ai/tests/logger/types.test.ts new file mode 100644 index 000000000..6af840385 --- /dev/null +++ b/packages/typescript/ai/tests/logger/types.test.ts @@ -0,0 +1,66 @@ +import { describe, expectTypeOf, it } from 'vitest' +import type { + DebugCategories, + DebugConfig, + DebugOption, + Logger, +} from '../../src/logger/types' + +describe('logger types', () => { + it('Logger has debug/info/warn/error methods accepting (message, meta?)', () => { + expectTypeOf().parameters.toEqualTypeOf< + [string, Record?] + >() + expectTypeOf().parameters.toEqualTypeOf< + [string, Record?] + >() + expectTypeOf().parameters.toEqualTypeOf< + [string, Record?] + >() + expectTypeOf().parameters.toEqualTypeOf< + [string, Record?] + >() + }) + + it('Logger methods all return void', () => { + expectTypeOf().returns.toEqualTypeOf() + expectTypeOf().returns.toEqualTypeOf() + expectTypeOf().returns.toEqualTypeOf() + expectTypeOf().returns.toEqualTypeOf() + }) + + it('DebugCategories has all eight optional boolean flags and allows empty object', () => { + expectTypeOf().toEqualTypeOf<{ + provider?: boolean + output?: boolean + middleware?: boolean + tools?: boolean + agentLoop?: boolean + config?: boolean + errors?: boolean + request?: boolean + }>() + const empty: DebugCategories = {} + void empty + }) + + it('DebugConfig extends DebugCategories and adds optional logger; empty object is valid', () => { + expectTypeOf().toMatchTypeOf() + const withLogger: DebugConfig = { + logger: { debug() {}, info() {}, warn() {}, error() {} }, + } + const empty: DebugConfig = {} + void withLogger + void empty + }) + + it('DebugOption equals boolean | DebugConfig', () => { + expectTypeOf().toEqualTypeOf() + const a: DebugOption = true + const b: DebugOption = false + const c: DebugOption = { middleware: false } + void a + void b + void c + }) +}) diff --git a/packages/typescript/ai/tests/stream-generation.test.ts b/packages/typescript/ai/tests/stream-generation.test.ts index 8cb554cf4..155b10d65 100644 --- a/packages/typescript/ai/tests/stream-generation.test.ts +++ b/packages/typescript/ai/tests/stream-generation.test.ts @@ -290,6 +290,7 @@ describe('generateVideo({ stream: true })', () => { size: '1280x720', duration: 5, modelOptions: undefined, + logger: expect.any(Object), }) }) diff --git a/packages/typescript/ai/vite.config.ts b/packages/typescript/ai/vite.config.ts index 2a28101da..cb2d342e3 100644 --- a/packages/typescript/ai/vite.config.ts +++ b/packages/typescript/ai/vite.config.ts @@ -33,6 +33,7 @@ export default mergeConfig( './src/index.ts', './src/activities/index.ts', './src/middlewares/index.ts', + './src/adapter-internals.ts', ], srcDir: './src', cjs: false,