2026-01-14 09:11:16 +00:00
import crypto from "node:crypto" ;
import fs from "node:fs" ;
import { resolveAgentModelFallbacksOverride } from "../../agents/agent-scope.js" ;
import { runCliAgent } from "../../agents/cli-runner.js" ;
import { getCliSessionId } from "../../agents/cli-session.js" ;
import { runWithModelFallback } from "../../agents/model-fallback.js" ;
import { isCliProvider } from "../../agents/model-selection.js" ;
import { runEmbeddedPiAgent } from "../../agents/pi-embedded.js" ;
import {
isCompactionFailureError ,
isContextOverflowError ,
2026-01-20 10:06:47 +00:00
isLikelyContextOverflowError ,
2026-01-16 03:00:40 +00:00
sanitizeUserFacingText ,
2026-01-14 09:11:16 +00:00
} from "../../agents/pi-embedded-helpers.js" ;
import {
resolveAgentIdFromSessionKey ,
resolveSessionTranscriptPath ,
type SessionEntry ,
2026-01-15 23:06:42 +00:00
updateSessionStore ,
2026-01-14 09:11:16 +00:00
} from "../../config/sessions.js" ;
import { logVerbose } from "../../globals.js" ;
2026-01-19 00:34:16 +00:00
import { emitAgentEvent , registerAgentRunContext } from "../../infra/agent-events.js" ;
2026-01-14 09:11:16 +00:00
import { defaultRuntime } from "../../runtime.js" ;
2026-01-17 10:17:57 +00:00
import {
isMarkdownCapableMessageChannel ,
resolveMessageChannel ,
} from "../../utils/message-channel.js" ;
2026-01-14 09:11:16 +00:00
import { stripHeartbeatToken } from "../heartbeat.js" ;
import type { TemplateContext } from "../templating.js" ;
import type { VerboseLevel } from "../thinking.js" ;
import { isSilentReplyText , SILENT_REPLY_TOKEN } from "../tokens.js" ;
import type { GetReplyOptions , ReplyPayload } from "../types.js" ;
2026-01-19 00:34:16 +00:00
import { buildThreadingToolContext , resolveEnforceFinalTag } from "./agent-runner-utils.js" ;
import { createBlockReplyPayloadKey , type BlockReplyPipeline } from "./block-reply-pipeline.js" ;
2026-01-14 09:11:16 +00:00
import type { FollowupRun } from "./queue.js" ;
import { parseReplyDirectives } from "./reply-directives.js" ;
2026-01-19 00:34:16 +00:00
import { applyReplyTagsToPayload , isRenderablePayload } from "./reply-payloads.js" ;
2026-01-14 09:11:16 +00:00
import type { TypingSignaler } from "./typing-mode.js" ;
export type AgentRunLoopResult =
| {
kind : "success" ;
runResult : Awaited < ReturnType < typeof runEmbeddedPiAgent > > ;
fallbackProvider? : string ;
fallbackModel? : string ;
didLogHeartbeatStrip : boolean ;
autoCompactionCompleted : boolean ;
2026-01-15 20:55:52 -08:00
/** Payload keys sent directly (not via pipeline) during tool flush. */
directlySentBlockKeys? : Set < string > ;
2026-01-14 09:11:16 +00:00
}
| { kind : "final" ; payload : ReplyPayload } ;
export async function runAgentTurnWithFallback ( params : {
commandBody : string ;
followupRun : FollowupRun ;
sessionCtx : TemplateContext ;
opts? : GetReplyOptions ;
typingSignals : TypingSignaler ;
blockReplyPipeline : BlockReplyPipeline | null ;
blockStreamingEnabled : boolean ;
blockReplyChunking ? : {
minChars : number ;
maxChars : number ;
breakPreference : "paragraph" | "newline" | "sentence" ;
} ;
resolvedBlockStreamingBreak : "text_end" | "message_end" ;
applyReplyToMode : ( payload : ReplyPayload ) = > ReplyPayload ;
shouldEmitToolResult : ( ) = > boolean ;
2026-01-17 05:33:27 +00:00
shouldEmitToolOutput : ( ) = > boolean ;
2026-01-14 09:11:16 +00:00
pendingToolTasks : Set < Promise < void > > ;
resetSessionAfterCompactionFailure : ( reason : string ) = > Promise < boolean > ;
2026-01-16 09:03:54 +00:00
resetSessionAfterRoleOrderingConflict : ( reason : string ) = > Promise < boolean > ;
2026-01-14 09:11:16 +00:00
isHeartbeat : boolean ;
sessionKey? : string ;
getActiveSessionEntry : ( ) = > SessionEntry | undefined ;
activeSessionStore? : Record < string , SessionEntry > ;
storePath? : string ;
resolvedVerboseLevel : VerboseLevel ;
} ) : Promise < AgentRunLoopResult > {
let didLogHeartbeatStrip = false ;
let autoCompactionCompleted = false ;
2026-01-15 20:55:52 -08:00
// Track payloads sent directly (not via pipeline) during tool flush to avoid duplicates.
const directlySentBlockKeys = new Set < string > ( ) ;
2026-01-14 09:11:16 +00:00
2026-01-23 22:51:37 +00:00
const runId = params . opts ? . runId ? ? crypto . randomUUID ( ) ;
params . opts ? . onAgentRunStart ? . ( runId ) ;
2026-01-14 09:11:16 +00:00
if ( params . sessionKey ) {
registerAgentRunContext ( runId , {
sessionKey : params.sessionKey ,
verboseLevel : params.resolvedVerboseLevel ,
} ) ;
}
let runResult : Awaited < ReturnType < typeof runEmbeddedPiAgent > > ;
let fallbackProvider = params . followupRun . run . provider ;
let fallbackModel = params . followupRun . run . model ;
let didResetAfterCompactionFailure = false ;
while ( true ) {
try {
const allowPartialStream = ! (
2026-01-19 00:34:16 +00:00
params . followupRun . run . reasoningLevel === "stream" && params . opts ? . onReasoningStream
2026-01-14 09:11:16 +00:00
) ;
2026-01-19 00:34:16 +00:00
const normalizeStreamingText = ( payload : ReplyPayload ) : { text? : string ; skip : boolean } = > {
2026-01-14 09:11:16 +00:00
if ( ! allowPartialStream ) return { skip : true } ;
let text = payload . text ;
if ( ! params . isHeartbeat && text ? . includes ( "HEARTBEAT_OK" ) ) {
const stripped = stripHeartbeatToken ( text , {
mode : "message" ,
} ) ;
if ( stripped . didStrip && ! didLogHeartbeatStrip ) {
didLogHeartbeatStrip = true ;
logVerbose ( "Stripped stray HEARTBEAT_OK token from reply" ) ;
}
if ( stripped . shouldSkip && ( payload . mediaUrls ? . length ? ? 0 ) === 0 ) {
return { skip : true } ;
}
text = stripped . text ;
}
if ( isSilentReplyText ( text , SILENT_REPLY_TOKEN ) ) {
return { skip : true } ;
}
2026-01-16 03:00:40 +00:00
if ( ! text ) return { skip : true } ;
const sanitized = sanitizeUserFacingText ( text ) ;
if ( ! sanitized . trim ( ) ) return { skip : true } ;
return { text : sanitized , skip : false } ;
2026-01-14 09:11:16 +00:00
} ;
2026-01-19 00:34:16 +00:00
const handlePartialForTyping = async ( payload : ReplyPayload ) : Promise < string | undefined > = > {
2026-01-14 09:11:16 +00:00
const { text , skip } = normalizeStreamingText ( payload ) ;
if ( skip || ! text ) return undefined ;
await params . typingSignals . signalTextDelta ( text ) ;
return text ;
} ;
const blockReplyPipeline = params . blockReplyPipeline ;
const onToolResult = params . opts ? . onToolResult ;
const fallbackResult = await runWithModelFallback ( {
cfg : params.followupRun.run.config ,
provider : params.followupRun.run.provider ,
model : params.followupRun.run.model ,
fallbacksOverride : resolveAgentModelFallbacksOverride (
params . followupRun . run . config ,
resolveAgentIdFromSessionKey ( params . followupRun . run . sessionKey ) ,
) ,
run : ( provider , model ) = > {
feat: add dynamic template variables to messages.responsePrefix (#923)
Adds support for template variables in `messages.responsePrefix` that
resolve dynamically at runtime with the actual model used (including
after fallback).
Supported variables (case-insensitive):
- {model} - short model name (e.g., "claude-opus-4-5", "gpt-4o")
- {modelFull} - full model identifier (e.g., "anthropic/claude-opus-4-5")
- {provider} - provider name (e.g., "anthropic", "openai")
- {thinkingLevel} or {think} - thinking level ("high", "low", "off")
- {identity.name} or {identityName} - agent identity name
Example: "[{model} | think:{thinkingLevel}]" → "[claude-opus-4-5 | think:high]"
Variables show the actual model used after fallback, not the intended
model. Unresolved variables remain as literal text.
Implementation:
- New module: src/auto-reply/reply/response-prefix-template.ts
- Template interpolation in normalize-reply.ts via context provider
- onModelSelected callback in agent-runner-execution.ts
- Updated all 6 provider message handlers (web, signal, discord,
telegram, slack, imessage)
- 27 unit tests covering all variables and edge cases
- Documentation in docs/gateway/configuration.md and JSDoc
Fixes #923
2026-01-14 23:05:08 -05:00
// Notify that model selection is complete (including after fallback).
// This allows responsePrefix template interpolation with the actual model.
params . opts ? . onModelSelected ? . ( {
provider ,
model ,
thinkLevel : params.followupRun.run.thinkLevel ,
} ) ;
2026-01-14 09:11:16 +00:00
if ( isCliProvider ( provider , params . followupRun . run . config ) ) {
const startedAt = Date . now ( ) ;
emitAgentEvent ( {
runId ,
stream : "lifecycle" ,
data : {
phase : "start" ,
startedAt ,
} ,
} ) ;
2026-01-19 00:34:16 +00:00
const cliSessionId = getCliSessionId ( params . getActiveSessionEntry ( ) , provider ) ;
2026-01-14 09:11:16 +00:00
return runCliAgent ( {
sessionId : params.followupRun.run.sessionId ,
sessionKey : params.sessionKey ,
sessionFile : params.followupRun.run.sessionFile ,
workspaceDir : params.followupRun.run.workspaceDir ,
config : params.followupRun.run.config ,
prompt : params.commandBody ,
provider ,
model ,
thinkLevel : params.followupRun.run.thinkLevel ,
timeoutMs : params.followupRun.run.timeoutMs ,
runId ,
extraSystemPrompt : params.followupRun.run.extraSystemPrompt ,
ownerNumbers : params.followupRun.run.ownerNumbers ,
cliSessionId ,
2026-01-23 22:51:37 +00:00
images : params.opts?.images ,
2026-01-14 09:11:16 +00:00
} )
. then ( ( result ) = > {
emitAgentEvent ( {
runId ,
stream : "lifecycle" ,
data : {
phase : "end" ,
startedAt ,
endedAt : Date.now ( ) ,
} ,
} ) ;
return result ;
} )
. catch ( ( err ) = > {
emitAgentEvent ( {
runId ,
stream : "lifecycle" ,
data : {
phase : "error" ,
startedAt ,
endedAt : Date.now ( ) ,
error : err instanceof Error ? err.message : String ( err ) ,
} ,
} ) ;
throw err ;
} ) ;
}
2026-01-18 08:22:50 +00:00
const authProfileId =
provider === params . followupRun . run . provider
? params . followupRun . run . authProfileId
: undefined ;
2026-01-14 09:11:16 +00:00
return runEmbeddedPiAgent ( {
sessionId : params.followupRun.run.sessionId ,
sessionKey : params.sessionKey ,
2026-01-19 00:34:16 +00:00
messageProvider : params.sessionCtx.Provider?.trim ( ) . toLowerCase ( ) || undefined ,
2026-01-14 09:11:16 +00:00
agentAccountId : params.sessionCtx.AccountId ,
2026-01-20 17:22:07 +00:00
messageTo : params.sessionCtx.OriginatingTo ? ? params . sessionCtx . To ,
messageThreadId : params.sessionCtx.MessageThreadId ? ? undefined ,
2026-01-14 09:11:16 +00:00
// Provider threading context for tool auto-injection
. . . buildThreadingToolContext ( {
sessionCtx : params.sessionCtx ,
config : params.followupRun.run.config ,
hasRepliedRef : params.opts?.hasRepliedRef ,
} ) ,
sessionFile : params.followupRun.run.sessionFile ,
workspaceDir : params.followupRun.run.workspaceDir ,
agentDir : params.followupRun.run.agentDir ,
config : params.followupRun.run.config ,
skillsSnapshot : params.followupRun.run.skillsSnapshot ,
prompt : params.commandBody ,
extraSystemPrompt : params.followupRun.run.extraSystemPrompt ,
ownerNumbers : params.followupRun.run.ownerNumbers ,
2026-01-19 00:34:16 +00:00
enforceFinalTag : resolveEnforceFinalTag ( params . followupRun . run , provider ) ,
2026-01-14 09:11:16 +00:00
provider ,
model ,
2026-01-18 08:22:50 +00:00
authProfileId ,
authProfileIdSource : authProfileId
? params . followupRun . run . authProfileIdSource
: undefined ,
2026-01-14 09:11:16 +00:00
thinkLevel : params.followupRun.run.thinkLevel ,
verboseLevel : params.followupRun.run.verboseLevel ,
reasoningLevel : params.followupRun.run.reasoningLevel ,
2026-01-18 06:11:38 +00:00
execOverrides : params.followupRun.run.execOverrides ,
2026-01-17 10:17:57 +00:00
toolResultFormat : ( ( ) = > {
const channel = resolveMessageChannel (
params . sessionCtx . Surface ,
params . sessionCtx . Provider ,
) ;
if ( ! channel ) return "markdown" ;
2026-01-19 00:34:16 +00:00
return isMarkdownCapableMessageChannel ( channel ) ? "markdown" : "plain" ;
2026-01-17 10:17:57 +00:00
} ) ( ) ,
2026-01-14 09:11:16 +00:00
bashElevated : params.followupRun.run.bashElevated ,
timeoutMs : params.followupRun.run.timeoutMs ,
runId ,
2026-01-23 22:51:37 +00:00
images : params.opts?.images ,
abortSignal : params.opts?.abortSignal ,
2026-01-14 09:11:16 +00:00
blockReplyBreak : params.resolvedBlockStreamingBreak ,
blockReplyChunking : params.blockReplyChunking ,
onPartialReply : allowPartialStream
? async ( payload ) = > {
const textForTyping = await handlePartialForTyping ( payload ) ;
2026-01-19 00:34:16 +00:00
if ( ! params . opts ? . onPartialReply || textForTyping === undefined ) return ;
2026-01-14 09:11:16 +00:00
await params . opts . onPartialReply ( {
text : textForTyping ,
mediaUrls : payload.mediaUrls ,
} ) ;
}
: undefined ,
onAssistantMessageStart : async ( ) = > {
await params . typingSignals . signalMessageStart ( ) ;
} ,
onReasoningStream :
2026-01-19 00:34:16 +00:00
params . typingSignals . shouldStartOnReasoning || params . opts ? . onReasoningStream
2026-01-14 09:11:16 +00:00
? async ( payload ) = > {
await params . typingSignals . signalReasoningDelta ( ) ;
await params . opts ? . onReasoningStream ? . ( {
text : payload.text ,
mediaUrls : payload.mediaUrls ,
} ) ;
}
: undefined ,
2026-01-15 20:55:52 -08:00
onAgentEvent : async ( evt ) = > {
// Trigger typing when tools start executing.
// Must await to ensure typing indicator starts before tool summaries are emitted.
2026-01-14 09:11:16 +00:00
if ( evt . stream === "tool" ) {
2026-01-19 00:34:16 +00:00
const phase = typeof evt . data . phase === "string" ? evt . data . phase : "" ;
2026-01-14 09:11:16 +00:00
if ( phase === "start" || phase === "update" ) {
2026-01-15 20:55:52 -08:00
await params . typingSignals . signalToolStart ( ) ;
2026-01-14 09:11:16 +00:00
}
}
// Track auto-compaction completion
if ( evt . stream === "compaction" ) {
2026-01-19 00:34:16 +00:00
const phase = typeof evt . data . phase === "string" ? evt . data . phase : "" ;
2026-01-14 09:11:16 +00:00
const willRetry = Boolean ( evt . data . willRetry ) ;
if ( phase === "end" && ! willRetry ) {
autoCompactionCompleted = true ;
}
}
} ,
2026-01-15 20:55:52 -08:00
// Always pass onBlockReply so flushBlockReplyBuffer works before tool execution,
// even when regular block streaming is disabled. The handler sends directly
// via opts.onBlockReply when the pipeline isn't available.
onBlockReply : params.opts?.onBlockReply
? async ( payload ) = > {
const { text , skip } = normalizeStreamingText ( payload ) ;
const hasPayloadMedia = ( payload . mediaUrls ? . length ? ? 0 ) > 0 ;
if ( skip && ! hasPayloadMedia ) return ;
2026-01-21 00:39:39 -08:00
const currentMessageId =
params . sessionCtx . MessageSidFull ? ? params . sessionCtx . MessageSid ;
2026-01-15 20:55:52 -08:00
const taggedPayload = applyReplyTagsToPayload (
{
text ,
mediaUrls : payload.mediaUrls ,
mediaUrl : payload.mediaUrls?. [ 0 ] ,
2026-01-19 23:40:22 -08:00
replyToId : payload.replyToId ,
replyToTag : payload.replyToTag ,
replyToCurrent : payload.replyToCurrent ,
2026-01-15 20:55:52 -08:00
} ,
2026-01-21 00:39:39 -08:00
currentMessageId ,
2026-01-15 20:55:52 -08:00
) ;
// Let through payloads with audioAsVoice flag even if empty (need to track it)
2026-01-19 00:34:16 +00:00
if ( ! isRenderablePayload ( taggedPayload ) && ! payload . audioAsVoice ) return ;
const parsed = parseReplyDirectives ( taggedPayload . text ? ? "" , {
2026-01-21 00:39:39 -08:00
currentMessageId ,
2026-01-19 00:34:16 +00:00
silentToken : SILENT_REPLY_TOKEN ,
} ) ;
2026-01-15 20:55:52 -08:00
const cleaned = parsed . text || undefined ;
const hasRenderableMedia =
2026-01-19 00:34:16 +00:00
Boolean ( taggedPayload . mediaUrl ) || ( taggedPayload . mediaUrls ? . length ? ? 0 ) > 0 ;
2026-01-15 20:55:52 -08:00
// Skip empty payloads unless they have audioAsVoice flag (need to track it)
if (
! cleaned &&
! hasRenderableMedia &&
! payload . audioAsVoice &&
! parsed . audioAsVoice
)
return ;
if ( parsed . isSilent && ! hasRenderableMedia ) return ;
2026-01-14 09:11:16 +00:00
2026-01-15 20:55:52 -08:00
const blockPayload : ReplyPayload = params . applyReplyToMode ( {
. . . taggedPayload ,
text : cleaned ,
2026-01-19 00:34:16 +00:00
audioAsVoice : Boolean ( parsed . audioAsVoice || payload . audioAsVoice ) ,
2026-01-15 20:55:52 -08:00
replyToId : taggedPayload.replyToId ? ? parsed . replyToId ,
replyToTag : taggedPayload.replyToTag || parsed . replyToTag ,
2026-01-19 00:34:16 +00:00
replyToCurrent : taggedPayload.replyToCurrent || parsed . replyToCurrent ,
2026-01-15 20:55:52 -08:00
} ) ;
2026-01-14 09:11:16 +00:00
2026-01-15 20:55:52 -08:00
void params . typingSignals
. signalTextDelta ( cleaned ? ? taggedPayload . text )
. catch ( ( err ) = > {
2026-01-19 00:34:16 +00:00
logVerbose ( ` block reply typing signal failed: ${ String ( err ) } ` ) ;
2026-01-15 20:55:52 -08:00
} ) ;
2026-01-14 09:11:16 +00:00
2026-01-15 20:55:52 -08:00
// Use pipeline if available (block streaming enabled), otherwise send directly
2026-01-19 00:34:16 +00:00
if ( params . blockStreamingEnabled && params . blockReplyPipeline ) {
2026-01-15 20:55:52 -08:00
params . blockReplyPipeline . enqueue ( blockPayload ) ;
} else {
// Send directly when flushing before tool execution (no streaming).
// Track sent key to avoid duplicate in final payloads.
2026-01-19 00:34:16 +00:00
directlySentBlockKeys . add ( createBlockReplyPayloadKey ( blockPayload ) ) ;
2026-01-15 20:55:52 -08:00
await params . opts ? . onBlockReply ? . ( blockPayload ) ;
2026-01-14 09:11:16 +00:00
}
2026-01-15 20:55:52 -08:00
}
: undefined ,
2026-01-14 09:11:16 +00:00
onBlockReplyFlush :
params . blockStreamingEnabled && blockReplyPipeline
? async ( ) = > {
await blockReplyPipeline . flush ( { force : true } ) ;
}
: undefined ,
shouldEmitToolResult : params.shouldEmitToolResult ,
2026-01-17 05:33:27 +00:00
shouldEmitToolOutput : params.shouldEmitToolOutput ,
2026-01-14 09:11:16 +00:00
onToolResult : onToolResult
? ( payload ) = > {
// `subscribeEmbeddedPiSession` may invoke tool callbacks without awaiting them.
// If a tool callback starts typing after the run finalized, we can end up with
// a typing loop that never sees a matching markRunComplete(). Track and drain.
const task = ( async ( ) = > {
const { text , skip } = normalizeStreamingText ( payload ) ;
if ( skip ) return ;
await params . typingSignals . signalTextDelta ( text ) ;
await onToolResult ( {
text ,
mediaUrls : payload.mediaUrls ,
} ) ;
} ) ( )
. catch ( ( err ) = > {
logVerbose ( ` tool result delivery failed: ${ String ( err ) } ` ) ;
} )
. finally ( ( ) = > {
params . pendingToolTasks . delete ( task ) ;
} ) ;
params . pendingToolTasks . add ( task ) ;
}
: undefined ,
} ) ;
} ,
} ) ;
runResult = fallbackResult . result ;
fallbackProvider = fallbackResult . provider ;
fallbackModel = fallbackResult . model ;
// Some embedded runs surface context overflow as an error payload instead of throwing.
// Treat those as a session-level failure and auto-recover by starting a fresh session.
const embeddedError = runResult . meta ? . error ;
if (
embeddedError &&
isContextOverflowError ( embeddedError . message ) &&
! didResetAfterCompactionFailure &&
( await params . resetSessionAfterCompactionFailure ( embeddedError . message ) )
) {
didResetAfterCompactionFailure = true ;
2026-01-18 18:16:20 +00:00
return {
kind : "final" ,
payload : {
2026-01-18 19:37:15 +00:00
text : "⚠️ Context limit exceeded. I've reset our conversation to start fresh - please try again.\n\nTo prevent this, increase your compaction buffer by setting `agents.defaults.compaction.reserveTokensFloor` to 4000 or higher in your config." ,
2026-01-18 18:16:20 +00:00
} ,
} ;
2026-01-14 09:11:16 +00:00
}
2026-01-16 09:03:54 +00:00
if ( embeddedError ? . kind === "role_ordering" ) {
2026-01-19 00:34:16 +00:00
const didReset = await params . resetSessionAfterRoleOrderingConflict ( embeddedError . message ) ;
2026-01-16 09:03:54 +00:00
if ( didReset ) {
return {
kind : "final" ,
payload : {
text : "⚠️ Message ordering conflict. I've reset the conversation - please try again." ,
} ,
} ;
}
}
2026-01-14 09:11:16 +00:00
break ;
} catch ( err ) {
const message = err instanceof Error ? err.message : String ( err ) ;
2026-01-20 10:06:47 +00:00
const isContextOverflow = isLikelyContextOverflowError ( message ) ;
2026-01-14 09:11:16 +00:00
const isCompactionFailure = isCompactionFailureError ( message ) ;
2026-01-19 00:34:16 +00:00
const isSessionCorruption = /function call turn comes immediately after/i . test ( message ) ;
const isRoleOrderingError = /incorrect role information|roles must alternate/i . test ( message ) ;
2026-01-14 09:11:16 +00:00
if (
isCompactionFailure &&
! didResetAfterCompactionFailure &&
( await params . resetSessionAfterCompactionFailure ( message ) )
) {
didResetAfterCompactionFailure = true ;
2026-01-18 18:16:20 +00:00
return {
kind : "final" ,
payload : {
2026-01-18 19:37:15 +00:00
text : "⚠️ Context limit exceeded during compaction. I've reset our conversation to start fresh - please try again.\n\nTo prevent this, increase your compaction buffer by setting `agents.defaults.compaction.reserveTokensFloor` to 4000 or higher in your config." ,
2026-01-18 18:16:20 +00:00
} ,
} ;
2026-01-14 09:11:16 +00:00
}
2026-01-16 09:03:54 +00:00
if ( isRoleOrderingError ) {
2026-01-19 00:34:16 +00:00
const didReset = await params . resetSessionAfterRoleOrderingConflict ( message ) ;
2026-01-16 09:03:54 +00:00
if ( didReset ) {
return {
kind : "final" ,
payload : {
text : "⚠️ Message ordering conflict. I've reset the conversation - please try again." ,
} ,
} ;
}
}
2026-01-14 09:11:16 +00:00
// Auto-recover from Gemini session corruption by resetting the session
if (
isSessionCorruption &&
params . sessionKey &&
params . activeSessionStore &&
params . storePath
) {
2026-01-15 23:06:42 +00:00
const sessionKey = params . sessionKey ;
2026-01-14 09:11:16 +00:00
const corruptedSessionId = params . getActiveSessionEntry ( ) ? . sessionId ;
defaultRuntime . error (
` Session history corrupted (Gemini function call ordering). Resetting session: ${ params . sessionKey } ` ,
) ;
try {
// Delete transcript file if it exists
if ( corruptedSessionId ) {
2026-01-19 00:34:16 +00:00
const transcriptPath = resolveSessionTranscriptPath ( corruptedSessionId ) ;
2026-01-14 09:11:16 +00:00
try {
fs . unlinkSync ( transcriptPath ) ;
} catch {
// Ignore if file doesn't exist
}
}
2026-01-15 23:09:47 +00:00
// Keep the in-memory snapshot consistent with the on-disk store reset.
delete params . activeSessionStore [ sessionKey ] ;
2026-01-15 23:06:42 +00:00
// Remove session entry from store using a fresh, locked snapshot.
await updateSessionStore ( params . storePath , ( store ) = > {
delete store [ sessionKey ] ;
} ) ;
2026-01-14 09:11:16 +00:00
} catch ( cleanupErr ) {
defaultRuntime . error (
` Failed to reset corrupted session ${ params . sessionKey } : ${ String ( cleanupErr ) } ` ,
) ;
}
return {
kind : "final" ,
payload : {
text : "⚠️ Session history was corrupted. I've reset the conversation - please try again!" ,
} ,
} ;
}
defaultRuntime . error ( ` Embedded agent failed before reply: ${ message } ` ) ;
return {
kind : "final" ,
payload : {
text : isContextOverflow
? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model."
2026-01-16 03:00:40 +00:00
: isRoleOrderingError
? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session."
2026-01-23 22:33:12 +00:00
: ` ⚠️ Agent failed before reply: ${ message } \ nCheck gateway logs for details. ` ,
2026-01-14 09:11:16 +00:00
} ,
} ;
}
}
return {
kind : "success" ,
runResult ,
fallbackProvider ,
fallbackModel ,
didLogHeartbeatStrip ,
autoCompactionCompleted ,
2026-01-19 00:34:16 +00:00
directlySentBlockKeys : directlySentBlockKeys.size > 0 ? directlySentBlockKeys : undefined ,
2026-01-14 09:11:16 +00:00
} ;
}