2026-01-26 22:05:31 -05:00
|
|
|
import crypto from "node:crypto";
|
|
|
|
|
import fs from "node:fs/promises";
|
|
|
|
|
import os from "node:os";
|
|
|
|
|
import path from "node:path";
|
2026-01-09 19:59:45 +01:00
|
|
|
import { describe, expect, it, vi } from "vitest";
|
2026-01-30 03:15:10 +01:00
|
|
|
import type { OpenClawConfig } from "../config/config.js";
|
2026-03-07 22:50:27 +00:00
|
|
|
import { resetLogger, setLoggerOverride } from "../logging/logger.js";
|
2026-01-26 22:05:31 -05:00
|
|
|
import type { AuthProfileStore } from "./auth-profiles.js";
|
|
|
|
|
import { saveAuthProfileStore } from "./auth-profiles.js";
|
|
|
|
|
import { AUTH_STORE_VERSION } from "./auth-profiles/constants.js";
|
2026-02-23 04:55:43 +00:00
|
|
|
import { isAnthropicBillingError } from "./live-auth-keys.js";
|
2026-02-25 03:46:34 +00:00
|
|
|
import { runWithImageModelFallback, runWithModelFallback } from "./model-fallback.js";
|
2026-02-22 20:01:43 +00:00
|
|
|
import { makeModelFallbackCfg } from "./test-helpers/model-fallback-config-fixture.js";
|
2026-01-09 19:59:45 +01:00
|
|
|
|
2026-02-22 20:01:43 +00:00
|
|
|
const makeCfg = makeModelFallbackCfg;
|
2026-01-09 19:59:45 +01:00
|
|
|
|
2026-02-18 05:04:51 +00:00
|
|
|
function makeFallbacksOnlyCfg(): OpenClawConfig {
|
|
|
|
|
return {
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
fallbacks: ["openai/gpt-5.2"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
} as OpenClawConfig;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function makeProviderFallbackCfg(provider: string): OpenClawConfig {
|
|
|
|
|
return makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: `${provider}/m1`,
|
|
|
|
|
fallbacks: ["fallback/ok-model"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async function withTempAuthStore<T>(
|
|
|
|
|
store: AuthProfileStore,
|
|
|
|
|
run: (tempDir: string) => Promise<T>,
|
|
|
|
|
): Promise<T> {
|
|
|
|
|
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-auth-"));
|
|
|
|
|
saveAuthProfileStore(store, tempDir);
|
|
|
|
|
try {
|
|
|
|
|
return await run(tempDir);
|
|
|
|
|
} finally {
|
|
|
|
|
await fs.rm(tempDir, { recursive: true, force: true });
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async function runWithStoredAuth(params: {
|
|
|
|
|
cfg: OpenClawConfig;
|
|
|
|
|
store: AuthProfileStore;
|
|
|
|
|
provider: string;
|
|
|
|
|
run: (provider: string, model: string) => Promise<string>;
|
|
|
|
|
}) {
|
|
|
|
|
return withTempAuthStore(params.store, async (tempDir) =>
|
|
|
|
|
runWithModelFallback({
|
|
|
|
|
cfg: params.cfg,
|
|
|
|
|
provider: params.provider,
|
|
|
|
|
model: "m1",
|
|
|
|
|
agentDir: tempDir,
|
|
|
|
|
run: params.run,
|
|
|
|
|
}),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-16 14:52:09 +00:00
|
|
|
async function expectFallsBackToHaiku(params: {
|
|
|
|
|
provider: string;
|
|
|
|
|
model: string;
|
|
|
|
|
firstError: Error;
|
|
|
|
|
}) {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi.fn().mockRejectedValueOnce(params.firstError).mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: params.provider,
|
|
|
|
|
model: params.model,
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run.mock.calls[1]?.[0]).toBe("anthropic");
|
|
|
|
|
expect(run.mock.calls[1]?.[1]).toBe("claude-haiku-3-5");
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-22 20:01:43 +00:00
|
|
|
function createOverrideFailureRun(params: {
|
|
|
|
|
overrideProvider: string;
|
|
|
|
|
overrideModel: string;
|
|
|
|
|
fallbackProvider: string;
|
|
|
|
|
fallbackModel: string;
|
|
|
|
|
firstError: Error;
|
|
|
|
|
}) {
|
|
|
|
|
return vi.fn().mockImplementation(async (provider, model) => {
|
|
|
|
|
if (provider === params.overrideProvider && model === params.overrideModel) {
|
|
|
|
|
throw params.firstError;
|
|
|
|
|
}
|
|
|
|
|
if (provider === params.fallbackProvider && model === params.fallbackModel) {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 05:43:30 +00:00
|
|
|
function makeSingleProviderStore(params: {
|
|
|
|
|
provider: string;
|
|
|
|
|
usageStat: NonNullable<AuthProfileStore["usageStats"]>[string];
|
|
|
|
|
}): AuthProfileStore {
|
|
|
|
|
const profileId = `${params.provider}:default`;
|
|
|
|
|
return {
|
|
|
|
|
version: AUTH_STORE_VERSION,
|
|
|
|
|
profiles: {
|
|
|
|
|
[profileId]: {
|
|
|
|
|
type: "api_key",
|
|
|
|
|
provider: params.provider,
|
|
|
|
|
key: "test-key",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
usageStats: {
|
|
|
|
|
[profileId]: params.usageStat,
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function createFallbackOnlyRun() {
|
|
|
|
|
return vi.fn().mockImplementation(async (providerId, modelId) => {
|
|
|
|
|
if (providerId === "fallback") {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`unexpected provider: ${providerId}/${modelId}`);
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async function expectSkippedUnavailableProvider(params: {
|
|
|
|
|
providerPrefix: string;
|
|
|
|
|
usageStat: NonNullable<AuthProfileStore["usageStats"]>[string];
|
|
|
|
|
expectedReason: string;
|
|
|
|
|
}) {
|
|
|
|
|
const provider = `${params.providerPrefix}-${crypto.randomUUID()}`;
|
|
|
|
|
const cfg = makeProviderFallbackCfg(provider);
|
2026-02-25 19:35:40 -06:00
|
|
|
const primaryStore = makeSingleProviderStore({
|
2026-02-23 05:43:30 +00:00
|
|
|
provider,
|
|
|
|
|
usageStat: params.usageStat,
|
|
|
|
|
});
|
2026-02-25 19:35:40 -06:00
|
|
|
// Include fallback provider profile so the fallback is attempted (not skipped as no-profile).
|
|
|
|
|
const store: AuthProfileStore = {
|
|
|
|
|
...primaryStore,
|
|
|
|
|
profiles: {
|
|
|
|
|
...primaryStore.profiles,
|
|
|
|
|
"fallback:default": {
|
|
|
|
|
type: "api_key",
|
|
|
|
|
provider: "fallback",
|
|
|
|
|
key: "test-key",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
};
|
2026-02-23 05:43:30 +00:00
|
|
|
const run = createFallbackOnlyRun();
|
|
|
|
|
|
|
|
|
|
const result = await runWithStoredAuth({
|
|
|
|
|
cfg,
|
|
|
|
|
store,
|
|
|
|
|
provider,
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([["fallback", "ok-model"]]);
|
|
|
|
|
expect(result.attempts[0]?.reason).toBe(params.expectedReason);
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-06 00:42:59 +03:00
|
|
|
// OpenAI 429 example shape: https://help.openai.com/en/articles/5955604-how-can-i-solve-429-too-many-requests-errors
|
|
|
|
|
const OPENAI_RATE_LIMIT_MESSAGE =
|
|
|
|
|
"Rate limit reached for gpt-4.1-mini in organization org_test on requests per min. Limit: 3.000000 / min. Current: 3.000000 / min.";
|
|
|
|
|
// Anthropic overloaded_error example shape: https://docs.anthropic.com/en/api/errors
|
|
|
|
|
const ANTHROPIC_OVERLOADED_PAYLOAD =
|
|
|
|
|
'{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_test"}';
|
2026-03-06 01:17:48 +03:00
|
|
|
// Issue-backed Anthropic/OpenAI-compatible insufficient_quota payload under HTTP 400:
|
|
|
|
|
// https://github.com/openclaw/openclaw/issues/23440
|
|
|
|
|
const INSUFFICIENT_QUOTA_PAYLOAD =
|
|
|
|
|
'{"type":"error","error":{"type":"insufficient_quota","message":"Your account has insufficient quota balance to run this request."}}';
|
2026-03-06 00:42:59 +03:00
|
|
|
// Internal OpenClaw compatibility marker, not a provider API contract.
|
|
|
|
|
const MODEL_COOLDOWN_MESSAGE = "model_cooldown: All credentials for model gpt-5 are cooling down";
|
|
|
|
|
// SDK/transport compatibility marker, not a provider API contract.
|
|
|
|
|
const CONNECTION_ERROR_MESSAGE = "Connection error.";
|
|
|
|
|
|
2026-01-09 19:59:45 +01:00
|
|
|
describe("runWithModelFallback", () => {
|
2026-03-01 21:45:12 -06:00
|
|
|
it("keeps openai gpt-5.3 codex on the openai provider before running", async () => {
|
Agents: add nested subagent orchestration controls and reduce subagent token waste (#14447)
* Agents: add subagent orchestration controls
* Agents: add subagent orchestration controls (WIP uncommitted changes)
* feat(subagents): add depth-based spawn gating for sub-sub-agents
* feat(subagents): tool policy, registry, and announce chain for nested agents
* feat(subagents): system prompt, docs, changelog for nested sub-agents
* fix(subagents): prevent model fallback override, show model during active runs, and block context overflow fallback
Bug 1: When a session has an explicit model override (e.g., gpt/openai-codex),
the fallback candidate logic in resolveFallbackCandidates silently appended the
global primary model (opus) as a backstop. On reinjection/steer with a transient
error, the session could fall back to opus which has a smaller context window
and crash. Fix: when storedModelOverride is set, pass fallbacksOverride ?? []
instead of undefined, preventing the implicit primary backstop.
Bug 2: Active subagents showed 'model n/a' in /subagents list because
resolveModelDisplay only read entry.model/modelProvider (populated after run
completes). Fix: fall back to modelOverride/providerOverride fields which are
populated at spawn time via sessions.patch.
Bug 3: Context overflow errors (prompt too long, context_length_exceeded) could
theoretically escape runEmbeddedPiAgent and be treated as failover candidates
in runWithModelFallback, causing a switch to a model with a smaller context
window. Fix: in runWithModelFallback, detect context overflow errors via
isLikelyContextOverflowError and rethrow them immediately instead of trying the
next model candidate.
* fix(subagents): track spawn depth in session store and fix announce routing for nested agents
* Fix compaction status tracking and dedupe overflow compaction triggers
* fix(subagents): enforce depth block via session store and implement cascade kill
* fix: inject group chat context into system prompt
* fix(subagents): always write model to session store at spawn time
* Preserve spawnDepth when agent handler rewrites session entry
* fix(subagents): suppress announce on steer-restart
* fix(subagents): fallback spawned session model to runtime default
* fix(subagents): enforce spawn depth when caller key resolves by sessionId
* feat(subagents): implement active-first ordering for numeric targets and enhance task display
- Added a test to verify that subagents with numeric targets follow an active-first list ordering.
- Updated `resolveSubagentTarget` to sort subagent runs based on active status and recent activity.
- Enhanced task display in command responses to prevent truncation of long task descriptions.
- Introduced new utility functions for compacting task text and managing subagent run states.
* fix(subagents): show model for active runs via run record fallback
When the spawned model matches the agent's default model, the session
store's override fields are intentionally cleared (isDefault: true).
The model/modelProvider fields are only populated after the run
completes. This left active subagents showing 'model n/a'.
Fix: store the resolved model on SubagentRunRecord at registration
time, and use it as a fallback in both display paths (subagents tool
and /subagents command) when the session store entry has no model info.
Changes:
- SubagentRunRecord: add optional model field
- registerSubagentRun: accept and persist model param
- sessions-spawn-tool: pass resolvedModel to registerSubagentRun
- subagents-tool: pass run record model as fallback to resolveModelDisplay
- commands-subagents: pass run record model as fallback to resolveModelDisplay
* feat(chat): implement session key resolution and reset on sidebar navigation
- Added functions to resolve the main session key and reset chat state when switching sessions from the sidebar.
- Updated the `renderTab` function to handle session key changes when navigating to the chat tab.
- Introduced a test to verify that the session resets to "main" when opening chat from the sidebar navigation.
* fix: subagent timeout=0 passthrough and fallback prompt duplication
Bug 1: runTimeoutSeconds=0 now means 'no timeout' instead of applying 600s default
- sessions-spawn-tool: default to undefined (not 0) when neither timeout param
is provided; use != null check so explicit 0 passes through to gateway
- agent.ts: accept 0 as valid timeout (resolveAgentTimeoutMs already handles
0 → MAX_SAFE_TIMEOUT_MS)
Bug 2: model fallback no longer re-injects the original prompt as a duplicate
- agent.ts: track fallback attempt index; on retries use a short continuation
message instead of the full original prompt since the session file already
contains it from the first attempt
- Also skip re-sending images on fallback retries (already in session)
* feat(subagents): truncate long task descriptions in subagents command output
- Introduced a new utility function to format task previews, limiting their length to improve readability.
- Updated the command handler to use the new formatting function, ensuring task descriptions are truncated appropriately.
- Adjusted related tests to verify that long task descriptions are now truncated in the output.
* refactor(subagents): update subagent registry path resolution and improve command output formatting
- Replaced direct import of STATE_DIR with a utility function to resolve the state directory dynamically.
- Enhanced the formatting of command output for active and recent subagents, adding separators for better readability.
- Updated related tests to reflect changes in command output structure.
* fix(subagent): default sessions_spawn to no timeout when runTimeoutSeconds omitted
The previous fix (75a791106) correctly handled the case where
runTimeoutSeconds was explicitly set to 0 ("no timeout"). However,
when models omit the parameter entirely (which is common since the
schema marks it as optional), runTimeoutSeconds resolved to undefined.
undefined flowed through the chain as:
sessions_spawn → timeout: undefined (since undefined != null is false)
→ gateway agent handler → agentCommand opts.timeout: undefined
→ resolveAgentTimeoutMs({ overrideSeconds: undefined })
→ DEFAULT_AGENT_TIMEOUT_SECONDS (600s = 10 minutes)
This caused subagents to be killed at exactly 10 minutes even though
the user's intent (via TOOLS.md) was for subagents to run without a
timeout.
Fix: default runTimeoutSeconds to 0 (no timeout) when neither
runTimeoutSeconds nor timeoutSeconds is provided by the caller.
Subagent spawns are long-running by design and should not inherit the
600s agent-command default timeout.
* fix(subagent): accept timeout=0 in agent-via-gateway path (second 600s default)
* fix: thread timeout override through getReplyFromConfig dispatch path
getReplyFromConfig called resolveAgentTimeoutMs({ cfg }) with no override,
always falling back to the config default (600s). Add timeoutOverrideSeconds
to GetReplyOptions and pass it through as overrideSeconds so callers of the
dispatch chain can specify a custom timeout (0 = no timeout).
This complements the existing timeout threading in agentCommand and the
cron isolated-agent runner, which already pass overrideSeconds correctly.
* feat(model-fallback): normalize OpenAI Codex model references and enhance fallback handling
- Added normalization for OpenAI Codex model references, specifically converting "gpt-5.3-codex" to "openai-codex" before execution.
- Updated the `resolveFallbackCandidates` function to utilize the new normalization logic.
- Enhanced tests to verify the correct behavior of model normalization and fallback mechanisms.
- Introduced a new test case to ensure that the normalization process works as expected for various input formats.
* feat(tests): add unit tests for steer failure behavior in openclaw-tools
- Introduced a new test file to validate the behavior of subagents when steer replacement dispatch fails.
- Implemented tests to ensure that the announce behavior is restored correctly and that the suppression reason is cleared as expected.
- Enhanced the subagent registry with a new function to clear steer restart suppression.
- Updated related components to support the new test scenarios.
* fix(subagents): replace stop command with kill in slash commands and documentation
- Updated the `/subagents` command to replace `stop` with `kill` for consistency in controlling sub-agent runs.
- Modified related documentation to reflect the change in command usage.
- Removed legacy timeoutSeconds references from the sessions-spawn-tool schema and tests to streamline timeout handling.
- Enhanced tests to ensure correct behavior of the updated commands and their interactions.
* feat(tests): add unit tests for readLatestAssistantReply function
- Introduced a new test file for the `readLatestAssistantReply` function to validate its behavior with various message scenarios.
- Implemented tests to ensure the function correctly retrieves the latest assistant message and handles cases where the latest message has no text.
- Mocked the gateway call to simulate different message histories for comprehensive testing.
* feat(tests): enhance subagent kill-all cascade tests and announce formatting
- Added a new test to verify that the `kill-all` command cascades through ended parents to active descendants in subagents.
- Updated the subagent announce formatting tests to reflect changes in message structure, including the replacement of "Findings:" with "Result:" and the addition of new expectations for message content.
- Improved the handling of long findings and stats in the announce formatting logic to ensure concise output.
- Refactored related functions to enhance clarity and maintainability in the subagent registry and tools.
* refactor(subagent): update announce formatting and remove unused constants
- Modified the subagent announce formatting to replace "Findings:" with "Result:" and adjusted related expectations in tests.
- Removed constants for maximum announce findings characters and summary words, simplifying the announcement logic.
- Updated the handling of findings to retain full content instead of truncating, ensuring more informative outputs.
- Cleaned up unused imports in the commands-subagents file to enhance code clarity.
* feat(tests): enhance billing error handling in user-facing text
- Added tests to ensure that normal text mentioning billing plans is not rewritten, preserving user context.
- Updated the `isBillingErrorMessage` and `sanitizeUserFacingText` functions to improve handling of billing-related messages.
- Introduced new test cases for various scenarios involving billing messages to ensure accurate processing and output.
- Enhanced the subagent announce flow to correctly manage active descendant runs, preventing premature announcements.
* feat(subagent): enhance workflow guidance and auto-announcement clarity
- Added a new guideline in the subagent system prompt to emphasize trust in push-based completion, discouraging busy polling for status updates.
- Updated documentation to clarify that sub-agents will automatically announce their results, improving user understanding of the workflow.
- Enhanced tests to verify the new guidance on avoiding polling loops and to ensure the accuracy of the updated prompts.
* fix(cron): avoid announcing interim subagent spawn acks
* chore: clean post-rebase imports
* fix(cron): fall back to child replies when parent stays interim
* fix(subagents): make active-run guidance advisory
* fix(subagents): update announce flow to handle active descendants and enhance test coverage
- Modified the announce flow to defer announcements when active descendant runs are present, ensuring accurate status reporting.
- Updated tests to verify the new behavior, including scenarios where no fallback requester is available and ensuring proper handling of finished subagents.
- Enhanced the announce formatting to include an `expectFinal` flag for better clarity in the announcement process.
* fix(subagents): enhance announce flow and formatting for user updates
- Updated the announce flow to provide clearer instructions for user updates based on active subagent runs and requester context.
- Refactored the announcement logic to improve clarity and ensure internal context remains private.
- Enhanced tests to verify the new message expectations and formatting, including updated prompts for user-facing updates.
- Introduced a new function to build reply instructions based on session context, improving the overall announcement process.
* fix: resolve prep blockers and changelog placement (#14447) (thanks @tyler6204)
* fix: restore cron delivery-plan import after rebase (#14447) (thanks @tyler6204)
* fix: resolve test failures from rebase conflicts (#14447) (thanks @tyler6204)
* fix: apply formatting after rebase (#14447) (thanks @tyler6204)
2026-02-14 22:03:45 -08:00
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi.fn().mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-5.3-codex",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
2026-03-01 21:45:12 -06:00
|
|
|
expect(run).toHaveBeenCalledWith("openai", "gpt-5.3-codex");
|
Agents: add nested subagent orchestration controls and reduce subagent token waste (#14447)
* Agents: add subagent orchestration controls
* Agents: add subagent orchestration controls (WIP uncommitted changes)
* feat(subagents): add depth-based spawn gating for sub-sub-agents
* feat(subagents): tool policy, registry, and announce chain for nested agents
* feat(subagents): system prompt, docs, changelog for nested sub-agents
* fix(subagents): prevent model fallback override, show model during active runs, and block context overflow fallback
Bug 1: When a session has an explicit model override (e.g., gpt/openai-codex),
the fallback candidate logic in resolveFallbackCandidates silently appended the
global primary model (opus) as a backstop. On reinjection/steer with a transient
error, the session could fall back to opus which has a smaller context window
and crash. Fix: when storedModelOverride is set, pass fallbacksOverride ?? []
instead of undefined, preventing the implicit primary backstop.
Bug 2: Active subagents showed 'model n/a' in /subagents list because
resolveModelDisplay only read entry.model/modelProvider (populated after run
completes). Fix: fall back to modelOverride/providerOverride fields which are
populated at spawn time via sessions.patch.
Bug 3: Context overflow errors (prompt too long, context_length_exceeded) could
theoretically escape runEmbeddedPiAgent and be treated as failover candidates
in runWithModelFallback, causing a switch to a model with a smaller context
window. Fix: in runWithModelFallback, detect context overflow errors via
isLikelyContextOverflowError and rethrow them immediately instead of trying the
next model candidate.
* fix(subagents): track spawn depth in session store and fix announce routing for nested agents
* Fix compaction status tracking and dedupe overflow compaction triggers
* fix(subagents): enforce depth block via session store and implement cascade kill
* fix: inject group chat context into system prompt
* fix(subagents): always write model to session store at spawn time
* Preserve spawnDepth when agent handler rewrites session entry
* fix(subagents): suppress announce on steer-restart
* fix(subagents): fallback spawned session model to runtime default
* fix(subagents): enforce spawn depth when caller key resolves by sessionId
* feat(subagents): implement active-first ordering for numeric targets and enhance task display
- Added a test to verify that subagents with numeric targets follow an active-first list ordering.
- Updated `resolveSubagentTarget` to sort subagent runs based on active status and recent activity.
- Enhanced task display in command responses to prevent truncation of long task descriptions.
- Introduced new utility functions for compacting task text and managing subagent run states.
* fix(subagents): show model for active runs via run record fallback
When the spawned model matches the agent's default model, the session
store's override fields are intentionally cleared (isDefault: true).
The model/modelProvider fields are only populated after the run
completes. This left active subagents showing 'model n/a'.
Fix: store the resolved model on SubagentRunRecord at registration
time, and use it as a fallback in both display paths (subagents tool
and /subagents command) when the session store entry has no model info.
Changes:
- SubagentRunRecord: add optional model field
- registerSubagentRun: accept and persist model param
- sessions-spawn-tool: pass resolvedModel to registerSubagentRun
- subagents-tool: pass run record model as fallback to resolveModelDisplay
- commands-subagents: pass run record model as fallback to resolveModelDisplay
* feat(chat): implement session key resolution and reset on sidebar navigation
- Added functions to resolve the main session key and reset chat state when switching sessions from the sidebar.
- Updated the `renderTab` function to handle session key changes when navigating to the chat tab.
- Introduced a test to verify that the session resets to "main" when opening chat from the sidebar navigation.
* fix: subagent timeout=0 passthrough and fallback prompt duplication
Bug 1: runTimeoutSeconds=0 now means 'no timeout' instead of applying 600s default
- sessions-spawn-tool: default to undefined (not 0) when neither timeout param
is provided; use != null check so explicit 0 passes through to gateway
- agent.ts: accept 0 as valid timeout (resolveAgentTimeoutMs already handles
0 → MAX_SAFE_TIMEOUT_MS)
Bug 2: model fallback no longer re-injects the original prompt as a duplicate
- agent.ts: track fallback attempt index; on retries use a short continuation
message instead of the full original prompt since the session file already
contains it from the first attempt
- Also skip re-sending images on fallback retries (already in session)
* feat(subagents): truncate long task descriptions in subagents command output
- Introduced a new utility function to format task previews, limiting their length to improve readability.
- Updated the command handler to use the new formatting function, ensuring task descriptions are truncated appropriately.
- Adjusted related tests to verify that long task descriptions are now truncated in the output.
* refactor(subagents): update subagent registry path resolution and improve command output formatting
- Replaced direct import of STATE_DIR with a utility function to resolve the state directory dynamically.
- Enhanced the formatting of command output for active and recent subagents, adding separators for better readability.
- Updated related tests to reflect changes in command output structure.
* fix(subagent): default sessions_spawn to no timeout when runTimeoutSeconds omitted
The previous fix (75a791106) correctly handled the case where
runTimeoutSeconds was explicitly set to 0 ("no timeout"). However,
when models omit the parameter entirely (which is common since the
schema marks it as optional), runTimeoutSeconds resolved to undefined.
undefined flowed through the chain as:
sessions_spawn → timeout: undefined (since undefined != null is false)
→ gateway agent handler → agentCommand opts.timeout: undefined
→ resolveAgentTimeoutMs({ overrideSeconds: undefined })
→ DEFAULT_AGENT_TIMEOUT_SECONDS (600s = 10 minutes)
This caused subagents to be killed at exactly 10 minutes even though
the user's intent (via TOOLS.md) was for subagents to run without a
timeout.
Fix: default runTimeoutSeconds to 0 (no timeout) when neither
runTimeoutSeconds nor timeoutSeconds is provided by the caller.
Subagent spawns are long-running by design and should not inherit the
600s agent-command default timeout.
* fix(subagent): accept timeout=0 in agent-via-gateway path (second 600s default)
* fix: thread timeout override through getReplyFromConfig dispatch path
getReplyFromConfig called resolveAgentTimeoutMs({ cfg }) with no override,
always falling back to the config default (600s). Add timeoutOverrideSeconds
to GetReplyOptions and pass it through as overrideSeconds so callers of the
dispatch chain can specify a custom timeout (0 = no timeout).
This complements the existing timeout threading in agentCommand and the
cron isolated-agent runner, which already pass overrideSeconds correctly.
* feat(model-fallback): normalize OpenAI Codex model references and enhance fallback handling
- Added normalization for OpenAI Codex model references, specifically converting "gpt-5.3-codex" to "openai-codex" before execution.
- Updated the `resolveFallbackCandidates` function to utilize the new normalization logic.
- Enhanced tests to verify the correct behavior of model normalization and fallback mechanisms.
- Introduced a new test case to ensure that the normalization process works as expected for various input formats.
* feat(tests): add unit tests for steer failure behavior in openclaw-tools
- Introduced a new test file to validate the behavior of subagents when steer replacement dispatch fails.
- Implemented tests to ensure that the announce behavior is restored correctly and that the suppression reason is cleared as expected.
- Enhanced the subagent registry with a new function to clear steer restart suppression.
- Updated related components to support the new test scenarios.
* fix(subagents): replace stop command with kill in slash commands and documentation
- Updated the `/subagents` command to replace `stop` with `kill` for consistency in controlling sub-agent runs.
- Modified related documentation to reflect the change in command usage.
- Removed legacy timeoutSeconds references from the sessions-spawn-tool schema and tests to streamline timeout handling.
- Enhanced tests to ensure correct behavior of the updated commands and their interactions.
* feat(tests): add unit tests for readLatestAssistantReply function
- Introduced a new test file for the `readLatestAssistantReply` function to validate its behavior with various message scenarios.
- Implemented tests to ensure the function correctly retrieves the latest assistant message and handles cases where the latest message has no text.
- Mocked the gateway call to simulate different message histories for comprehensive testing.
* feat(tests): enhance subagent kill-all cascade tests and announce formatting
- Added a new test to verify that the `kill-all` command cascades through ended parents to active descendants in subagents.
- Updated the subagent announce formatting tests to reflect changes in message structure, including the replacement of "Findings:" with "Result:" and the addition of new expectations for message content.
- Improved the handling of long findings and stats in the announce formatting logic to ensure concise output.
- Refactored related functions to enhance clarity and maintainability in the subagent registry and tools.
* refactor(subagent): update announce formatting and remove unused constants
- Modified the subagent announce formatting to replace "Findings:" with "Result:" and adjusted related expectations in tests.
- Removed constants for maximum announce findings characters and summary words, simplifying the announcement logic.
- Updated the handling of findings to retain full content instead of truncating, ensuring more informative outputs.
- Cleaned up unused imports in the commands-subagents file to enhance code clarity.
* feat(tests): enhance billing error handling in user-facing text
- Added tests to ensure that normal text mentioning billing plans is not rewritten, preserving user context.
- Updated the `isBillingErrorMessage` and `sanitizeUserFacingText` functions to improve handling of billing-related messages.
- Introduced new test cases for various scenarios involving billing messages to ensure accurate processing and output.
- Enhanced the subagent announce flow to correctly manage active descendant runs, preventing premature announcements.
* feat(subagent): enhance workflow guidance and auto-announcement clarity
- Added a new guideline in the subagent system prompt to emphasize trust in push-based completion, discouraging busy polling for status updates.
- Updated documentation to clarify that sub-agents will automatically announce their results, improving user understanding of the workflow.
- Enhanced tests to verify the new guidance on avoiding polling loops and to ensure the accuracy of the updated prompts.
* fix(cron): avoid announcing interim subagent spawn acks
* chore: clean post-rebase imports
* fix(cron): fall back to child replies when parent stays interim
* fix(subagents): make active-run guidance advisory
* fix(subagents): update announce flow to handle active descendants and enhance test coverage
- Modified the announce flow to defer announcements when active descendant runs are present, ensuring accurate status reporting.
- Updated tests to verify the new behavior, including scenarios where no fallback requester is available and ensuring proper handling of finished subagents.
- Enhanced the announce formatting to include an `expectFinal` flag for better clarity in the announcement process.
* fix(subagents): enhance announce flow and formatting for user updates
- Updated the announce flow to provide clearer instructions for user updates based on active subagent runs and requester context.
- Refactored the announcement logic to improve clarity and ensure internal context remains private.
- Enhanced tests to verify the new message expectations and formatting, including updated prompts for user-facing updates.
- Introduced a new function to build reply instructions based on session context, improving the overall announcement process.
* fix: resolve prep blockers and changelog placement (#14447) (thanks @tyler6204)
* fix: restore cron delivery-plan import after rebase (#14447) (thanks @tyler6204)
* fix: resolve test failures from rebase conflicts (#14447) (thanks @tyler6204)
* fix: apply formatting after rebase (#14447) (thanks @tyler6204)
2026-02-14 22:03:45 -08:00
|
|
|
});
|
|
|
|
|
|
2026-02-25 12:53:26 +08:00
|
|
|
it("falls back on unrecognized errors when candidates remain", async () => {
|
2026-01-09 19:59:45 +01:00
|
|
|
const cfg = makeCfg();
|
2026-01-14 14:31:43 +00:00
|
|
|
const run = vi.fn().mockRejectedValueOnce(new Error("bad request")).mockResolvedValueOnce("ok");
|
2026-01-09 19:59:45 +01:00
|
|
|
|
2026-02-25 12:53:26 +08:00
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(result.attempts).toHaveLength(1);
|
|
|
|
|
expect(result.attempts[0].error).toBe("bad request");
|
|
|
|
|
expect(result.attempts[0].reason).toBe("unknown");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("passes original unknown errors to onError during fallback", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const unknownError = new Error("provider misbehaved");
|
|
|
|
|
const run = vi.fn().mockRejectedValueOnce(unknownError).mockResolvedValueOnce("ok");
|
|
|
|
|
const onError = vi.fn();
|
|
|
|
|
|
|
|
|
|
await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
onError,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(onError).toHaveBeenCalledTimes(1);
|
|
|
|
|
expect(onError.mock.calls[0]?.[0]).toMatchObject({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
attempt: 1,
|
|
|
|
|
total: 2,
|
|
|
|
|
});
|
|
|
|
|
expect(onError.mock.calls[0]?.[0]?.error).toBe(unknownError);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("throws unrecognized error on last candidate", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi.fn().mockRejectedValueOnce(new Error("something weird"));
|
|
|
|
|
|
2026-01-09 19:59:45 +01:00
|
|
|
await expect(
|
|
|
|
|
runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
2026-02-25 12:53:26 +08:00
|
|
|
fallbacksOverride: [],
|
2026-01-09 19:59:45 +01:00
|
|
|
}),
|
2026-02-25 12:53:26 +08:00
|
|
|
).rejects.toThrow("something weird");
|
2026-01-09 19:59:45 +01:00
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on auth errors", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-09 19:59:45 +01:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("nope"), { status: 401 }),
|
2026-01-09 21:17:00 +01:00
|
|
|
});
|
2026-02-12 00:42:33 -03:00
|
|
|
});
|
|
|
|
|
|
2026-02-19 23:16:26 -04:00
|
|
|
it("falls back directly to configured primary when an override model fails", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
fallbacks: ["anthropic/claude-haiku-3-5", "openrouter/deepseek-chat"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-22 20:01:43 +00:00
|
|
|
const run = createOverrideFailureRun({
|
|
|
|
|
overrideProvider: "anthropic",
|
|
|
|
|
overrideModel: "claude-opus-4-5",
|
|
|
|
|
fallbackProvider: "openai",
|
|
|
|
|
fallbackModel: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("unauthorized"), { status: 401 }),
|
2026-02-19 23:16:26 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-5",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(result.provider).toBe("openai");
|
|
|
|
|
expect(result.model).toBe("gpt-4.1-mini");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["anthropic", "claude-opus-4-5"],
|
|
|
|
|
["openai", "gpt-4.1-mini"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-25 01:46:20 +00:00
|
|
|
it("keeps configured fallback chain when current model is a configured fallback", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
fallbacks: ["anthropic/claude-haiku-3-5", "openrouter/deepseek-chat"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi.fn().mockImplementation(async (provider: string, model: string) => {
|
|
|
|
|
if (provider === "anthropic" && model === "claude-haiku-3-5") {
|
|
|
|
|
throw Object.assign(new Error("rate-limited"), { status: 429 });
|
|
|
|
|
}
|
|
|
|
|
if (provider === "openrouter" && model === "openrouter/deepseek-chat") {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-haiku-3-5",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(result.provider).toBe("openrouter");
|
|
|
|
|
expect(result.model).toBe("openrouter/deepseek-chat");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["anthropic", "claude-haiku-3-5"],
|
|
|
|
|
["openrouter", "openrouter/deepseek-chat"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-19 23:16:26 -04:00
|
|
|
it("treats normalized default refs as primary and keeps configured fallback chain", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
fallbacks: ["anthropic/claude-haiku-3-5"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(Object.assign(new Error("nope"), { status: 401 }))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: " OpenAI ",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["openai", "gpt-4.1-mini"],
|
|
|
|
|
["anthropic", "claude-haiku-3-5"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-12 00:42:33 -03:00
|
|
|
it("falls back on transient HTTP 5xx errors", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-02-12 00:42:33 -03:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: new Error(
|
|
|
|
|
"521 <!DOCTYPE html><html><head><title>Web server is down</title></head><body>Cloudflare</body></html>",
|
|
|
|
|
),
|
2026-02-12 00:42:33 -03:00
|
|
|
});
|
2026-01-09 21:17:00 +01:00
|
|
|
});
|
|
|
|
|
|
2026-01-09 21:31:13 +01:00
|
|
|
it("falls back on 402 payment required", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-09 21:31:13 +01:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("payment required"), { status: 402 }),
|
2026-01-09 21:31:13 +01:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-09 21:17:00 +01:00
|
|
|
it("falls back on billing errors", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-09 21:17:00 +01:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: new Error(
|
|
|
|
|
"LLM request rejected: Your credit balance is too low to access the Anthropic API. Please go to Plans & Billing to upgrade or purchase credits.",
|
|
|
|
|
),
|
2026-01-09 19:59:45 +01:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-03-06 01:17:48 +03:00
|
|
|
it("records 400 insufficient_quota payloads as billing during fallback", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(Object.assign(new Error(INSUFFICIENT_QUOTA_PAYLOAD), { status: 400 }))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(result.attempts).toHaveLength(1);
|
|
|
|
|
expect(result.attempts[0]?.reason).toBe("billing");
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-19 23:16:26 -04:00
|
|
|
it("falls back to configured primary for override credential validation errors", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
2026-02-22 20:01:43 +00:00
|
|
|
const run = createOverrideFailureRun({
|
|
|
|
|
overrideProvider: "anthropic",
|
|
|
|
|
overrideModel: "claude-opus-4",
|
|
|
|
|
fallbackProvider: "openai",
|
|
|
|
|
fallbackModel: "gpt-4.1-mini",
|
|
|
|
|
firstError: new Error('No credentials found for profile "anthropic:default".'),
|
2026-02-19 23:16:26 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
2026-01-12 22:48:37 -05:00
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4",
|
2026-02-19 23:16:26 -04:00
|
|
|
run,
|
2026-01-12 22:48:37 -05:00
|
|
|
});
|
2026-02-19 23:16:26 -04:00
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["anthropic", "claude-opus-4"],
|
|
|
|
|
["openai", "gpt-4.1-mini"],
|
|
|
|
|
]);
|
2026-01-12 22:48:37 -05:00
|
|
|
});
|
|
|
|
|
|
2026-02-20 18:31:09 +08:00
|
|
|
it("falls back on unknown model errors", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Unknown model: anthropic/claude-opus-4-6"))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Override model failed with model_not_found → falls back to configured primary.
|
|
|
|
|
// (Same candidate-resolution path as other override-model failures.)
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run.mock.calls[1]?.[0]).toBe("openai");
|
|
|
|
|
expect(run.mock.calls[1]?.[1]).toBe("gpt-4.1-mini");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on model not found errors", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Model not found: openai/gpt-6"))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-6",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-25 19:35:40 -06:00
|
|
|
// Override model failed with model_not_found → tries fallbacks first (same provider).
|
2026-02-20 18:31:09 +08:00
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
2026-02-25 19:35:40 -06:00
|
|
|
expect(run.mock.calls[1]?.[0]).toBe("anthropic");
|
|
|
|
|
expect(run.mock.calls[1]?.[1]).toBe("claude-haiku-3-5");
|
2026-02-20 18:31:09 +08:00
|
|
|
});
|
|
|
|
|
|
2026-03-07 22:50:27 +00:00
|
|
|
it("warns when falling back due to model_not_found", async () => {
|
|
|
|
|
setLoggerOverride({ level: "silent", consoleLevel: "warn" });
|
|
|
|
|
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
|
|
|
|
try {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Model not found: openai/gpt-6"))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-6",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(warnSpy).toHaveBeenCalledWith(
|
|
|
|
|
expect.stringContaining('Model "openai/gpt-6" not found'),
|
|
|
|
|
);
|
|
|
|
|
} finally {
|
|
|
|
|
warnSpy.mockRestore();
|
|
|
|
|
setLoggerOverride(null);
|
|
|
|
|
resetLogger();
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
2026-03-07 23:07:06 +00:00
|
|
|
it("sanitizes model identifiers in model_not_found warnings", async () => {
|
|
|
|
|
setLoggerOverride({ level: "silent", consoleLevel: "warn" });
|
|
|
|
|
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
|
|
|
|
try {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Model not found: openai/gpt-6"))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-6\u001B[31m\nspoof",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
2026-03-10 01:12:10 +03:00
|
|
|
const warning = warnSpy.mock.calls
|
|
|
|
|
.map((call) => call[0] as string)
|
|
|
|
|
.find((value) => value.includes('Model "openai/gpt-6spoof" not found'));
|
2026-03-07 23:07:06 +00:00
|
|
|
expect(warning).toContain('Model "openai/gpt-6spoof" not found');
|
|
|
|
|
expect(warning).not.toContain("\u001B");
|
|
|
|
|
expect(warning).not.toContain("\n");
|
|
|
|
|
} finally {
|
|
|
|
|
warnSpy.mockRestore();
|
|
|
|
|
setLoggerOverride(null);
|
|
|
|
|
resetLogger();
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-26 22:05:31 -05:00
|
|
|
it("skips providers when all profiles are in cooldown", async () => {
|
2026-02-23 05:43:30 +00:00
|
|
|
await expectSkippedUnavailableProvider({
|
|
|
|
|
providerPrefix: "cooldown-test",
|
|
|
|
|
usageStat: {
|
|
|
|
|
cooldownUntil: Date.now() + 5 * 60_000,
|
2026-01-26 22:05:31 -05:00
|
|
|
},
|
2026-02-23 05:43:30 +00:00
|
|
|
expectedReason: "rate_limit",
|
2026-02-18 05:04:51 +00:00
|
|
|
});
|
2026-01-26 22:05:31 -05:00
|
|
|
});
|
|
|
|
|
|
2026-02-24 18:45:57 -05:00
|
|
|
it("does not skip OpenRouter when legacy cooldown markers exist", async () => {
|
|
|
|
|
const provider = "openrouter";
|
|
|
|
|
const cfg = makeProviderFallbackCfg(provider);
|
|
|
|
|
const store = makeSingleProviderStore({
|
|
|
|
|
provider,
|
|
|
|
|
usageStat: {
|
|
|
|
|
cooldownUntil: Date.now() + 5 * 60_000,
|
|
|
|
|
disabledUntil: Date.now() + 10 * 60_000,
|
|
|
|
|
disabledReason: "billing",
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const run = vi.fn().mockImplementation(async (providerId) => {
|
|
|
|
|
if (providerId === "openrouter") {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`unexpected provider: ${providerId}`);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = await runWithStoredAuth({
|
|
|
|
|
cfg,
|
|
|
|
|
store,
|
|
|
|
|
provider,
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
expect(run.mock.calls[0]?.[0]).toBe("openrouter");
|
|
|
|
|
expect(result.attempts).toEqual([]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-22 16:10:24 -08:00
|
|
|
it("propagates disabled reason when all profiles are unavailable", async () => {
|
|
|
|
|
const now = Date.now();
|
2026-02-23 05:43:30 +00:00
|
|
|
await expectSkippedUnavailableProvider({
|
|
|
|
|
providerPrefix: "disabled-test",
|
|
|
|
|
usageStat: {
|
|
|
|
|
disabledUntil: now + 5 * 60_000,
|
|
|
|
|
disabledReason: "billing",
|
|
|
|
|
failureCounts: { rate_limit: 4 },
|
2026-02-22 16:10:24 -08:00
|
|
|
},
|
2026-02-23 05:43:30 +00:00
|
|
|
expectedReason: "billing",
|
2026-02-22 16:10:24 -08:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-26 22:05:31 -05:00
|
|
|
it("does not skip when any profile is available", async () => {
|
|
|
|
|
const provider = `cooldown-mixed-${crypto.randomUUID()}`;
|
|
|
|
|
const profileA = `${provider}:a`;
|
|
|
|
|
const profileB = `${provider}:b`;
|
|
|
|
|
|
|
|
|
|
const store: AuthProfileStore = {
|
|
|
|
|
version: AUTH_STORE_VERSION,
|
|
|
|
|
profiles: {
|
|
|
|
|
[profileA]: {
|
|
|
|
|
type: "api_key",
|
|
|
|
|
provider,
|
|
|
|
|
key: "key-a",
|
|
|
|
|
},
|
|
|
|
|
[profileB]: {
|
|
|
|
|
type: "api_key",
|
|
|
|
|
provider,
|
|
|
|
|
key: "key-b",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
usageStats: {
|
|
|
|
|
[profileA]: {
|
|
|
|
|
cooldownUntil: Date.now() + 60_000,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-18 05:04:51 +00:00
|
|
|
const cfg = makeProviderFallbackCfg(provider);
|
2026-01-26 22:05:31 -05:00
|
|
|
const run = vi.fn().mockImplementation(async (providerId) => {
|
2026-01-31 16:19:20 +09:00
|
|
|
if (providerId === provider) {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
2026-01-26 22:05:31 -05:00
|
|
|
return "unexpected";
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-18 05:04:51 +00:00
|
|
|
const result = await runWithStoredAuth({
|
|
|
|
|
cfg,
|
|
|
|
|
store,
|
|
|
|
|
provider,
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([[provider, "m1"]]);
|
|
|
|
|
expect(result.attempts).toEqual([]);
|
2026-01-26 22:05:31 -05:00
|
|
|
});
|
|
|
|
|
|
2026-01-09 14:59:02 +01:00
|
|
|
it("does not append configured primary when fallbacksOverride is set", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
2026-01-14 14:31:43 +00:00
|
|
|
.mockImplementation(() => Promise.reject(Object.assign(new Error("nope"), { status: 401 })));
|
2026-01-09 14:59:02 +01:00
|
|
|
|
|
|
|
|
await expect(
|
|
|
|
|
runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-5",
|
|
|
|
|
fallbacksOverride: ["anthropic/claude-haiku-3-5"],
|
|
|
|
|
run,
|
|
|
|
|
}),
|
|
|
|
|
).rejects.toThrow("All models failed");
|
|
|
|
|
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["anthropic", "claude-opus-4-5"],
|
|
|
|
|
["anthropic", "claude-haiku-3-5"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-09 16:10:53 +01:00
|
|
|
it("uses fallbacksOverride instead of agents.defaults.model.fallbacks", async () => {
|
2026-02-18 05:04:51 +00:00
|
|
|
const cfg = makeFallbacksOnlyCfg();
|
2026-01-09 16:10:53 +01:00
|
|
|
|
|
|
|
|
const calls: Array<{ provider: string; model: string }> = [];
|
|
|
|
|
|
|
|
|
|
const res = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-5",
|
|
|
|
|
fallbacksOverride: ["openai/gpt-4.1"],
|
|
|
|
|
run: async (provider, model) => {
|
|
|
|
|
calls.push({ provider, model });
|
|
|
|
|
if (provider === "anthropic") {
|
2026-01-13 06:49:26 +00:00
|
|
|
throw Object.assign(new Error("nope"), { status: 401 });
|
2026-01-09 16:10:53 +01:00
|
|
|
}
|
|
|
|
|
if (provider === "openai" && model === "gpt-4.1") {
|
|
|
|
|
return "ok";
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`unexpected candidate: ${provider}/${model}`);
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(res.result).toBe("ok");
|
|
|
|
|
expect(calls).toEqual([
|
|
|
|
|
{ provider: "anthropic", model: "claude-opus-4-5" },
|
|
|
|
|
{ provider: "openai", model: "gpt-4.1" },
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("treats an empty fallbacksOverride as disabling global fallbacks", async () => {
|
2026-02-18 05:04:51 +00:00
|
|
|
const cfg = makeFallbacksOnlyCfg();
|
2026-01-09 16:10:53 +01:00
|
|
|
|
|
|
|
|
const calls: Array<{ provider: string; model: string }> = [];
|
|
|
|
|
|
|
|
|
|
await expect(
|
|
|
|
|
runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-5",
|
|
|
|
|
fallbacksOverride: [],
|
|
|
|
|
run: async (provider, model) => {
|
|
|
|
|
calls.push({ provider, model });
|
|
|
|
|
throw new Error("primary failed");
|
|
|
|
|
},
|
|
|
|
|
}),
|
|
|
|
|
).rejects.toThrow("primary failed");
|
|
|
|
|
|
2026-01-14 14:31:43 +00:00
|
|
|
expect(calls).toEqual([{ provider: "anthropic", model: "claude-opus-4-5" }]);
|
2026-01-09 16:10:53 +01:00
|
|
|
});
|
|
|
|
|
|
2026-02-25 03:46:34 +00:00
|
|
|
it("keeps explicit fallbacks reachable when models allowlist is present", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-sonnet-4",
|
|
|
|
|
fallbacks: ["openai/gpt-4o", "ollama/llama-3"],
|
|
|
|
|
},
|
|
|
|
|
models: {
|
|
|
|
|
"anthropic/claude-sonnet-4": {},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(Object.assign(new Error("rate limited"), { status: 429 }))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-sonnet-4",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["anthropic", "claude-sonnet-4"],
|
|
|
|
|
["openai", "gpt-4o"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-15 11:58:15 +00:00
|
|
|
it("defaults provider/model when missing (regression #946)", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
fallbacks: [],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const calls: Array<{ provider: string; model: string }> = [];
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: undefined as unknown as string,
|
|
|
|
|
model: undefined as unknown as string,
|
|
|
|
|
run: async (provider, model) => {
|
|
|
|
|
calls.push({ provider, model });
|
|
|
|
|
return "ok";
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(calls).toEqual([{ provider: "openai", model: "gpt-4.1-mini" }]);
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-13 04:02:47 +00:00
|
|
|
it("falls back on missing API key errors", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-13 04:02:47 +00:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: new Error("No API key found for profile openai."),
|
2026-01-13 04:12:02 +00:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on lowercase credential errors", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-13 04:12:02 +00:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: new Error("no api key found for profile openai"),
|
2026-01-13 04:02:47 +00:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-03-06 00:42:59 +03:00
|
|
|
it("falls back on documented OpenAI 429 rate limit responses", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error(OPENAI_RATE_LIMIT_MESSAGE), { status: 429 }),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on documented overloaded_error payloads", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: new Error(ANTHROPIC_OVERLOADED_PAYLOAD),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on internal model cooldown markers", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: new Error(MODEL_COOLDOWN_MESSAGE),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on compatibility connection error messages", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: new Error(CONNECTION_ERROR_MESSAGE),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-18 07:52:19 +00:00
|
|
|
it("falls back on timeout abort errors", async () => {
|
|
|
|
|
const timeoutCause = Object.assign(new Error("request timed out"), { name: "TimeoutError" });
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-18 07:52:19 +00:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("aborted"), { name: "AbortError", cause: timeoutCause }),
|
2026-01-18 07:52:19 +00:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on abort errors with timeout reasons", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-18 07:52:19 +00:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("aborted"), {
|
|
|
|
|
name: "AbortError",
|
|
|
|
|
reason: "deadline exceeded",
|
|
|
|
|
}),
|
2026-01-18 07:52:19 +00:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-16 20:59:44 -05:00
|
|
|
it("falls back on abort errors with reason: abort", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("aborted"), {
|
|
|
|
|
name: "AbortError",
|
|
|
|
|
reason: "reason: abort",
|
|
|
|
|
}),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-03-03 01:04:20 +00:00
|
|
|
it("falls back on unhandled stop reason error responses", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: new Error("Unhandled stop reason: error"),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-03-03 01:05:06 +00:00
|
|
|
it("falls back on abort errors with reason: error", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("aborted"), {
|
|
|
|
|
name: "AbortError",
|
|
|
|
|
reason: "reason: error",
|
|
|
|
|
}),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-20 19:23:06 +00:00
|
|
|
it("falls back when message says aborted but error is a timeout", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-20 19:23:06 +00:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("request aborted"), { code: "ETIMEDOUT" }),
|
2026-01-20 19:23:06 +00:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-17 18:09:05 +08:00
|
|
|
it("falls back on ECONNREFUSED (local server down or remote unreachable)", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("connect ECONNREFUSED 127.0.0.1:11434"), {
|
|
|
|
|
code: "ECONNREFUSED",
|
|
|
|
|
}),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on ENETUNREACH (network disconnected)", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("connect ENETUNREACH"), { code: "ENETUNREACH" }),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on EHOSTUNREACH (host unreachable)", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("connect EHOSTUNREACH"), { code: "EHOSTUNREACH" }),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("falls back on EAI_AGAIN (DNS resolution failure)", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("getaddrinfo EAI_AGAIN api.openai.com"), {
|
|
|
|
|
code: "EAI_AGAIN",
|
|
|
|
|
}),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-17 18:17:09 +08:00
|
|
|
it("falls back on ENETRESET (connection reset by network)", async () => {
|
|
|
|
|
await expectFallsBackToHaiku({
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
firstError: Object.assign(new Error("connect ENETRESET"), { code: "ENETRESET" }),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-24 06:27:24 -05:00
|
|
|
it("falls back on provider abort errors with request-aborted messages", async () => {
|
2026-02-16 14:52:09 +00:00
|
|
|
await expectFallsBackToHaiku({
|
2026-01-24 06:27:24 -05:00
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
2026-02-16 14:52:09 +00:00
|
|
|
firstError: Object.assign(new Error("Request was aborted"), { name: "AbortError" }),
|
2026-01-24 06:27:24 -05:00
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-18 07:52:19 +00:00
|
|
|
it("does not fall back on user aborts", async () => {
|
|
|
|
|
const cfg = makeCfg();
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(Object.assign(new Error("aborted"), { name: "AbortError" }))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
await expect(
|
|
|
|
|
runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai",
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
}),
|
|
|
|
|
).rejects.toThrow("aborted");
|
|
|
|
|
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
});
|
|
|
|
|
|
2026-01-09 19:59:45 +01:00
|
|
|
it("appends the configured primary as a last fallback", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "openai/gpt-4.1-mini",
|
|
|
|
|
fallbacks: [],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
2026-01-14 14:31:43 +00:00
|
|
|
.mockRejectedValueOnce(Object.assign(new Error("timeout"), { code: "ETIMEDOUT" }))
|
2026-01-09 19:59:45 +01:00
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openrouter",
|
|
|
|
|
model: "meta-llama/llama-3.3-70b:free",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(result.provider).toBe("openai");
|
|
|
|
|
expect(result.model).toBe("gpt-4.1-mini");
|
|
|
|
|
});
|
2026-02-25 19:35:40 -06:00
|
|
|
|
|
|
|
|
// Tests for Bug A fix: Model fallback with session overrides
|
|
|
|
|
describe("fallback behavior with session model overrides", () => {
|
|
|
|
|
it("allows fallbacks when session model differs from config within same provider", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "google/gemini-2.5-flash"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Rate limit exceeded")) // Session model fails
|
|
|
|
|
.mockResolvedValueOnce("fallback success"); // First fallback succeeds
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-sonnet-4-20250514", // Different from config primary
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("fallback success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-20250514");
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-sonnet-4-5"); // Fallback tried
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("allows fallbacks with model version differences within same provider", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Weekly quota exceeded"))
|
|
|
|
|
.mockResolvedValueOnce("groq success");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-5", // Version difference from config
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("groq success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("still skips fallbacks when using different provider than config", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: [], // Empty fallbacks to match working pattern
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error('No credentials found for profile "openai:default".'))
|
|
|
|
|
.mockResolvedValueOnce("config primary worked");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "openai", // Different provider
|
|
|
|
|
model: "gpt-4.1-mini",
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Cross-provider requests should skip configured fallbacks but still try configured primary
|
|
|
|
|
expect(result.result).toBe("config primary worked");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "openai", "gpt-4.1-mini"); // Original request
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-opus-4-6"); // Config primary as final fallback
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("uses fallbacks when session model exactly matches config primary", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Quota exceeded"))
|
|
|
|
|
.mockResolvedValueOnce("fallback worked");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6", // Exact match
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("fallback worked");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile");
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Tests for Bug B fix: Rate limit vs auth/billing cooldown distinction
|
|
|
|
|
describe("fallback behavior with provider cooldowns", () => {
|
|
|
|
|
async function makeAuthStoreWithCooldown(
|
|
|
|
|
provider: string,
|
2026-03-07 01:42:11 +03:00
|
|
|
reason: "rate_limit" | "overloaded" | "auth" | "billing",
|
2026-02-25 19:35:40 -06:00
|
|
|
): Promise<{ store: AuthProfileStore; dir: string }> {
|
|
|
|
|
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-test-"));
|
|
|
|
|
const now = Date.now();
|
|
|
|
|
const store: AuthProfileStore = {
|
|
|
|
|
version: AUTH_STORE_VERSION,
|
|
|
|
|
profiles: {
|
|
|
|
|
[`${provider}:default`]: { type: "api_key", provider, key: "test-key" },
|
|
|
|
|
},
|
|
|
|
|
usageStats: {
|
|
|
|
|
[`${provider}:default`]:
|
2026-03-07 01:42:11 +03:00
|
|
|
reason === "rate_limit" || reason === "overloaded"
|
2026-02-25 19:35:40 -06:00
|
|
|
? {
|
2026-03-07 01:42:11 +03:00
|
|
|
// Transient cooldown reasons are tracked through
|
|
|
|
|
// cooldownUntil and failureCounts, not disabledReason.
|
2026-02-25 19:35:40 -06:00
|
|
|
cooldownUntil: now + 300000,
|
2026-03-07 01:42:11 +03:00
|
|
|
failureCounts: { [reason]: 1 },
|
2026-02-25 19:35:40 -06:00
|
|
|
}
|
|
|
|
|
: {
|
|
|
|
|
// Auth/billing issues use disabledUntil
|
|
|
|
|
disabledUntil: now + 300000,
|
|
|
|
|
disabledReason: reason,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
saveAuthProfileStore(store, tmpDir);
|
|
|
|
|
return { store, dir: tmpDir };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
it("attempts same-provider fallbacks during rate limit cooldown", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi.fn().mockResolvedValueOnce("sonnet success"); // Fallback succeeds
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("sonnet success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1); // Primary skipped, fallback attempted
|
2026-03-05 20:02:36 -08:00
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", {
|
2026-03-07 01:42:11 +03:00
|
|
|
allowTransientCooldownProbe: true,
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("attempts same-provider fallbacks during overloaded cooldown", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "overloaded");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi.fn().mockResolvedValueOnce("sonnet success");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("sonnet success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", {
|
|
|
|
|
allowTransientCooldownProbe: true,
|
2026-03-05 20:02:36 -08:00
|
|
|
});
|
2026-02-25 19:35:40 -06:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("skips same-provider models on auth cooldown but still tries no-profile fallback providers", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "auth");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi.fn().mockResolvedValueOnce("groq success");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("groq success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "groq", "llama-3.3-70b-versatile");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("skips same-provider models on billing cooldown but still tries no-profile fallback providers", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "billing");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi.fn().mockResolvedValueOnce("groq success");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("groq success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(1);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "groq", "llama-3.3-70b-versatile");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("tries cross-provider fallbacks when same provider has rate limit", async () => {
|
|
|
|
|
// Anthropic in rate limit cooldown, Groq available
|
|
|
|
|
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-test-"));
|
|
|
|
|
const store: AuthProfileStore = {
|
|
|
|
|
version: AUTH_STORE_VERSION,
|
|
|
|
|
profiles: {
|
|
|
|
|
"anthropic:default": { type: "api_key", provider: "anthropic", key: "test-key" },
|
|
|
|
|
"groq:default": { type: "api_key", provider: "groq", key: "test-key" },
|
|
|
|
|
},
|
|
|
|
|
usageStats: {
|
|
|
|
|
"anthropic:default": {
|
|
|
|
|
// Rate-limit reason is inferred from failureCounts for cooldown windows.
|
|
|
|
|
cooldownUntil: Date.now() + 300000,
|
|
|
|
|
failureCounts: { rate_limit: 2 },
|
|
|
|
|
},
|
|
|
|
|
// Groq not in cooldown
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
saveAuthProfileStore(store, tmpDir);
|
|
|
|
|
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Still rate limited")) // Sonnet still fails
|
|
|
|
|
.mockResolvedValueOnce("groq success"); // Groq works
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: tmpDir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("groq success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
2026-03-05 20:02:36 -08:00
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", {
|
2026-03-07 01:42:11 +03:00
|
|
|
allowTransientCooldownProbe: true,
|
2026-03-05 20:02:36 -08:00
|
|
|
}); // Rate limit allows attempt
|
2026-02-25 19:35:40 -06:00
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); // Cross-provider works
|
|
|
|
|
});
|
2026-03-10 07:26:47 -05:00
|
|
|
|
|
|
|
|
it("limits cooldown probes to one per provider before moving to cross-provider fallback", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: [
|
|
|
|
|
"anthropic/claude-sonnet-4-5",
|
|
|
|
|
"anthropic/claude-haiku-3-5",
|
|
|
|
|
"groq/llama-3.3-70b-versatile",
|
|
|
|
|
],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Still rate limited")) // First same-provider probe fails
|
|
|
|
|
.mockResolvedValueOnce("groq success"); // Next provider succeeds
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("groq success");
|
|
|
|
|
// Primary is skipped, first same-provider fallback is probed, second same-provider fallback
|
|
|
|
|
// is skipped (probe already attempted), then cross-provider fallback runs.
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", {
|
|
|
|
|
allowTransientCooldownProbe: true,
|
|
|
|
|
});
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile");
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("does not consume transient probe slot when first same-provider probe fails with model_not_found", async () => {
|
|
|
|
|
const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit");
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
model: {
|
|
|
|
|
primary: "anthropic/claude-opus-4-6",
|
|
|
|
|
fallbacks: [
|
|
|
|
|
"anthropic/claude-sonnet-4-5",
|
|
|
|
|
"anthropic/claude-haiku-3-5",
|
|
|
|
|
"groq/llama-3.3-70b-versatile",
|
|
|
|
|
],
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("Model not found: anthropic/claude-sonnet-4-5"))
|
|
|
|
|
.mockResolvedValueOnce("haiku success");
|
|
|
|
|
|
|
|
|
|
const result = await runWithModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
provider: "anthropic",
|
|
|
|
|
model: "claude-opus-4-6",
|
|
|
|
|
run,
|
|
|
|
|
agentDir: dir,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("haiku success");
|
|
|
|
|
expect(run).toHaveBeenCalledTimes(2);
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", {
|
|
|
|
|
allowTransientCooldownProbe: true,
|
|
|
|
|
});
|
|
|
|
|
expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-haiku-3-5", {
|
|
|
|
|
allowTransientCooldownProbe: true,
|
|
|
|
|
});
|
|
|
|
|
});
|
2026-02-25 19:35:40 -06:00
|
|
|
});
|
2026-01-09 19:59:45 +01:00
|
|
|
});
|
2026-02-23 04:55:43 +00:00
|
|
|
|
2026-02-25 03:46:34 +00:00
|
|
|
describe("runWithImageModelFallback", () => {
|
|
|
|
|
it("keeps explicit image fallbacks reachable when models allowlist is present", async () => {
|
|
|
|
|
const cfg = makeCfg({
|
|
|
|
|
agents: {
|
|
|
|
|
defaults: {
|
|
|
|
|
imageModel: {
|
|
|
|
|
primary: "openai/gpt-image-1",
|
|
|
|
|
fallbacks: ["google/gemini-2.5-flash-image-preview"],
|
|
|
|
|
},
|
|
|
|
|
models: {
|
|
|
|
|
"openai/gpt-image-1": {},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const run = vi
|
|
|
|
|
.fn()
|
|
|
|
|
.mockRejectedValueOnce(new Error("rate limited"))
|
|
|
|
|
.mockResolvedValueOnce("ok");
|
|
|
|
|
|
|
|
|
|
const result = await runWithImageModelFallback({
|
|
|
|
|
cfg,
|
|
|
|
|
run,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
expect(result.result).toBe("ok");
|
|
|
|
|
expect(run.mock.calls).toEqual([
|
|
|
|
|
["openai", "gpt-image-1"],
|
|
|
|
|
["google", "gemini-2.5-flash-image-preview"],
|
|
|
|
|
]);
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-23 04:55:43 +00:00
|
|
|
describe("isAnthropicBillingError", () => {
|
|
|
|
|
it("does not false-positive on plain 'a 402' prose", () => {
|
|
|
|
|
const samples = [
|
|
|
|
|
"Use a 402 stainless bolt",
|
|
|
|
|
"Book a 402 room",
|
|
|
|
|
"There is a 402 near me",
|
|
|
|
|
"The building at 402 Main Street",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for (const sample of samples) {
|
|
|
|
|
expect(isAnthropicBillingError(sample)).toBe(false);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it("matches real 402 billing payload contexts including JSON keys", () => {
|
|
|
|
|
const samples = [
|
|
|
|
|
"HTTP 402 Payment Required",
|
|
|
|
|
"status: 402",
|
|
|
|
|
"error code 402",
|
|
|
|
|
'{"status":402,"type":"error"}',
|
|
|
|
|
'{"code":402,"message":"payment required"}',
|
|
|
|
|
'{"error":{"code":402,"message":"billing hard limit reached"}}',
|
|
|
|
|
"got a 402 from the API",
|
|
|
|
|
"returned 402",
|
|
|
|
|
"received a 402 response",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for (const sample of samples) {
|
|
|
|
|
expect(isAnthropicBillingError(sample)).toBe(true);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
});
|