fix(ollama): unify context window handling across discovery, merge, and OpenAI-compat transport (#29205)

* fix(ollama): inject num_ctx for OpenAI-compatible transport

* fix(ollama): discover per-model context and preserve higher limits

* fix(agents): prefer matching provider model for fallback limits

* fix(types): require numeric token limits in provider model merge

* fix(types): accept unknown payload in ollama num_ctx wrapper

* fix(types): simplify ollama settled-result extraction

* config(models): add provider flag for Ollama OpenAI num_ctx injection

* config(schema): allow provider num_ctx injection flag

* config(labels): label provider num_ctx injection flag

* config(help): document provider num_ctx injection flag

* agents(ollama): gate OpenAI num_ctx injection with provider config

* tests(ollama): cover provider num_ctx injection flag behavior

* docs(config): list provider num_ctx injection option

* docs(ollama): document OpenAI num_ctx injection toggle

* docs(config): clarify merge token-limit precedence

* config(help): note merge uses higher model token limits

* fix(ollama): cap /api/show discovery concurrency

* fix(ollama): restrict num_ctx injection to OpenAI compat

* tests(ollama): cover ipv6 and compat num_ctx gating

* fix(ollama): detect remote compat endpoints for ollama-labeled providers

* fix(ollama): cap per-model /api/show lookups to bound discovery load
This commit is contained in:
Vincent Koc
2026-02-27 17:20:47 -08:00
committed by GitHub
parent 70a4f25ab1
commit f16ecd1dac
14 changed files with 582 additions and 21 deletions

View File

@@ -171,6 +171,35 @@ describe("resolveModel", () => {
expect(result.model?.id).toBe("missing-model");
});
it("prefers matching configured model metadata for fallback token limits", () => {
const cfg = {
models: {
providers: {
custom: {
baseUrl: "http://localhost:9000",
models: [
{
...makeModel("model-a"),
contextWindow: 4096,
maxTokens: 1024,
},
{
...makeModel("model-b"),
contextWindow: 262144,
maxTokens: 32768,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("custom", "model-b", "/tmp/agent", cfg);
expect(result.model?.contextWindow).toBe(262144);
expect(result.model?.maxTokens).toBe(32768);
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockOpenAICodexTemplateModel();