fix(embedded): classify model_context_window_exceeded as context overflow, trigger compaction (#35934)

Merged via squash.

Prepared head SHA: 20fa77289c80b2807a6779a3df70440242bc18ca
Co-authored-by: RealKai42 <44634134+RealKai42@users.noreply.github.com>
Co-authored-by: jalehman <550978+jalehman@users.noreply.github.com>
Reviewed-by: @jalehman
This commit is contained in:
Kai
2026-03-06 03:30:24 +08:00
committed by GitHub
parent 72cf9253fc
commit 60a6d11116
5 changed files with 199 additions and 17 deletions

View File

@@ -483,6 +483,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Models/provider config precedence: prefer exact `models.providers.<name>` matches before normalized provider aliases in embedded model resolution, preventing alias/canonical key collisions from applying the wrong provider `api`, `baseUrl`, or headers. (#35934) thanks @RealKai42.
- Logging/Subsystem console timestamps: route subsystem console timestamp rendering through `formatConsoleTimestamp(...)` so `pretty` and timestamp-prefix output use local timezone formatting consistently instead of inline UTC `toISOString()` paths. (#25970) Thanks @openperf.
- Feishu/Multi-account + reply reliability: add `channels.feishu.defaultAccount` outbound routing support with schema validation, prevent inbound preview text from leaking into prompt system events, keep quoted-message extraction text-first (post/interactive/file placeholders instead of raw JSON), route Feishu video sends as `msg_type: "file"`, and avoid websocket event blocking by using non-blocking event handling in monitor dispatch. Landed from contributor PRs #31209, #29610, #30432, #30331, and #29501. Thanks @stakeswky, @hclsys, @bmendonca3, @patrick-yingxi-pan, and @zwffff.
- Feishu/Target routing + replies + dedupe: normalize provider-prefixed targets (`feishu:`/`lark:`), prefer configured `channels.feishu.defaultAccount` for tool execution, honor Feishu outbound `renderMode` in adapter text/caption sends, fall back to normal send when reply targets are withdrawn/deleted, and add synchronous in-memory dedupe guard for concurrent duplicate inbound events. Landed from contributor PRs #30428, #30438, #29958, #30444, and #29463. Thanks @bmendonca3 and @Yaxuan42.

View File

@@ -269,6 +269,21 @@ describe("isContextOverflowError", () => {
}
});
it("matches model_context_window_exceeded stop reason surfaced by pi-ai", () => {
// Anthropic API (and some OpenAI-compatible providers like ZhipuAI/GLM) return
// stop_reason: "model_context_window_exceeded" when the context window is hit.
// The pi-ai library surfaces this as "Unhandled stop reason: model_context_window_exceeded".
const samples = [
"Unhandled stop reason: model_context_window_exceeded",
"model_context_window_exceeded",
"context_window_exceeded",
"Unhandled stop reason: context_window_exceeded",
];
for (const sample of samples) {
expect(isContextOverflowError(sample)).toBe(true);
}
});
it("matches Chinese context overflow error messages from proxy providers", () => {
const samples = [
"上下文过长",

View File

@@ -105,6 +105,9 @@ export function isContextOverflowError(errorMessage?: string): boolean {
(lower.includes("max_tokens") && lower.includes("exceed") && lower.includes("context")) ||
(lower.includes("input length") && lower.includes("exceed") && lower.includes("context")) ||
(lower.includes("413") && lower.includes("too large")) ||
// Anthropic API and OpenAI-compatible providers (e.g. ZhipuAI/GLM) return this stop reason
// when the context window is exceeded. pi-ai surfaces it as "Unhandled stop reason: model_context_window_exceeded".
lower.includes("context_window_exceeded") ||
// Chinese proxy error messages for context overflow
errorMessage.includes("上下文过长") ||
errorMessage.includes("上下文超出") ||

View File

@@ -278,6 +278,118 @@ describe("resolveModel", () => {
expect(result.model?.reasoning).toBe(true);
});
it("prefers configured provider api metadata over discovered registry model", () => {
mockDiscoveredModel({
provider: "onehub",
modelId: "glm-5",
templateModel: {
id: "glm-5",
name: "GLM-5 (cached)",
provider: "onehub",
api: "anthropic-messages",
baseUrl: "https://old-provider.example.com/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 2048,
},
});
const cfg = {
models: {
providers: {
onehub: {
baseUrl: "http://new-provider.example.com/v1",
api: "openai-completions",
models: [
{
...makeModel("glm-5"),
api: "openai-completions",
reasoning: true,
contextWindow: 198000,
maxTokens: 16000,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("onehub", "glm-5", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "onehub",
id: "glm-5",
api: "openai-completions",
baseUrl: "http://new-provider.example.com/v1",
reasoning: true,
contextWindow: 198000,
maxTokens: 16000,
});
});
it("prefers exact provider config over normalized alias match when both keys exist", () => {
mockDiscoveredModel({
provider: "qwen",
modelId: "qwen3-coder-plus",
templateModel: {
id: "qwen3-coder-plus",
name: "Qwen3 Coder Plus",
provider: "qwen",
api: "openai-completions",
baseUrl: "https://default-provider.example.com/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 2048,
},
});
const cfg = {
models: {
providers: {
"qwen-portal": {
baseUrl: "https://canonical-provider.example.com/v1",
api: "openai-completions",
headers: { "X-Provider": "canonical" },
models: [{ ...makeModel("qwen3-coder-plus"), reasoning: false }],
},
qwen: {
baseUrl: "https://alias-provider.example.com/v1",
api: "anthropic-messages",
headers: { "X-Provider": "alias" },
models: [
{
...makeModel("qwen3-coder-plus"),
api: "anthropic-messages",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("qwen", "qwen3-coder-plus", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "qwen",
id: "qwen3-coder-plus",
api: "anthropic-messages",
baseUrl: "https://alias-provider.example.com",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
headers: { "X-Provider": "alias" },
});
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockOpenAICodexTemplateModel();

View File

@@ -7,7 +7,7 @@ import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
import { buildModelAliasLines } from "../model-alias-lines.js";
import { normalizeModelCompat } from "../model-compat.js";
import { resolveForwardCompatModel } from "../model-forward-compat.js";
import { normalizeProviderId } from "../model-selection.js";
import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js";
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
type InlineModelEntry = ModelDefinitionConfig & {
@@ -24,6 +24,60 @@ type InlineProviderConfig = {
export { buildModelAliasLines };
function resolveConfiguredProviderConfig(
cfg: OpenClawConfig | undefined,
provider: string,
): InlineProviderConfig | undefined {
const configuredProviders = cfg?.models?.providers;
if (!configuredProviders) {
return undefined;
}
const exactProviderConfig = configuredProviders[provider];
if (exactProviderConfig) {
return exactProviderConfig;
}
return findNormalizedProviderValue(configuredProviders, provider);
}
function applyConfiguredProviderOverrides(params: {
discoveredModel: Model<Api>;
providerConfig?: InlineProviderConfig;
modelId: string;
}): Model<Api> {
const { discoveredModel, providerConfig, modelId } = params;
if (!providerConfig) {
return discoveredModel;
}
const configuredModel = providerConfig.models?.find((candidate) => candidate.id === modelId);
if (
!configuredModel &&
!providerConfig.baseUrl &&
!providerConfig.api &&
!providerConfig.headers
) {
return discoveredModel;
}
return {
...discoveredModel,
api: configuredModel?.api ?? providerConfig.api ?? discoveredModel.api,
baseUrl: providerConfig.baseUrl ?? discoveredModel.baseUrl,
reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning,
input: configuredModel?.input ?? discoveredModel.input,
cost: configuredModel?.cost ?? discoveredModel.cost,
contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow,
maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens,
headers:
providerConfig.headers || configuredModel?.headers
? {
...discoveredModel.headers,
...providerConfig.headers,
...configuredModel?.headers,
}
: discoveredModel.headers,
compat: configuredModel?.compat ?? discoveredModel.compat,
};
}
export function buildInlineProviderModels(
providers: Record<string, InlineProviderConfig>,
): InlineModelEntry[] {
@@ -59,6 +113,7 @@ export function resolveModel(
const resolvedAgentDir = agentDir ?? resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(resolvedAgentDir);
const modelRegistry = discoverModels(authStorage, resolvedAgentDir);
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
if (!model) {
@@ -100,7 +155,7 @@ export function resolveModel(
} as Model<Api>);
return { model: fallbackModel, authStorage, modelRegistry };
}
const providerCfg = providers[provider];
const providerCfg = providerConfig;
if (providerCfg || modelId.startsWith("mock-")) {
const configuredModel = providerCfg?.models?.find((candidate) => candidate.id === modelId);
const fallbackModel: Model<Api> = normalizeModelCompat({
@@ -133,21 +188,17 @@ export function resolveModel(
modelRegistry,
};
}
const providerOverride = cfg?.models?.providers?.[provider] as InlineProviderConfig | undefined;
if (providerOverride?.baseUrl || providerOverride?.headers) {
const overridden: Model<Api> & { headers?: Record<string, string> } = { ...model };
if (providerOverride.baseUrl) {
overridden.baseUrl = providerOverride.baseUrl;
}
if (providerOverride.headers) {
overridden.headers = {
...(model as Model<Api> & { headers?: Record<string, string> }).headers,
...providerOverride.headers,
};
}
return { model: normalizeModelCompat(overridden), authStorage, modelRegistry };
}
return { model: normalizeModelCompat(model), authStorage, modelRegistry };
return {
model: normalizeModelCompat(
applyConfiguredProviderOverrides({
discoveredModel: model,
providerConfig,
modelId,
}),
),
authStorage,
modelRegistry,
};
}
/**