fix(agents): allow configured ollama endpoints without dummy api keys
This commit is contained in:
@@ -77,6 +77,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Subagents/announce completion scoping: scope nested direct-child completion aggregation to the current requester run window, harden frozen completion capture for deterministic descendant synthesis, and route completion announce delivery through parent-agent announce turns with provenance-aware internal events. (#35080) Thanks @tyler6204.
|
||||
- Nodes/system.run approval hardening: use explicit argv-mutation signaling when regenerating prepared `rawCommand`, and cover the `system.run.prepare -> system.run` handoff so direct PATH-based `nodes.run` commands no longer fail with `rawCommand does not match command`. (#33137) thanks @Sid-Qin.
|
||||
- Models/custom provider headers: propagate `models.providers.<name>.headers` across inline, fallback, and registry-found model resolution so header-authenticated proxies consistently receive configured request headers. (#27490) thanks @Sid-Qin.
|
||||
- Ollama/remote provider auth fallback: synthesize a local runtime auth key for explicitly configured `models.providers.ollama` entries that omit `apiKey`, so remote Ollama endpoints run without requiring manual dummy-key setup while preserving env/profile/config key precedence and missing-config failures. (#11283) Thanks @cpreecs.
|
||||
- Ollama/custom provider headers: forward resolved model headers into native Ollama stream requests so header-authenticated Ollama proxies receive configured request headers. (#24337) thanks @echoVic.
|
||||
- Daemon/systemd install robustness: treat `systemctl --user is-enabled` exit-code-4 `not-found` responses as not-enabled by combining stderr/stdout detail parsing, so Ubuntu fresh installs no longer fail with `systemctl is-enabled unavailable`. (#33634) Thanks @Yuandiaodiaodiao.
|
||||
- Slack/system-event session routing: resolve reaction/member/pin/interaction system-event session keys through channel/account bindings (with sender-aware DM routing) so inbound Slack events target the correct agent session in multi-account setups instead of defaulting to `agent:main`. (#34045) Thanks @paulomcg, @daht-mad and @vincentkoc.
|
||||
|
||||
@@ -226,6 +226,62 @@ describe("getApiKeyForModel", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves synthetic local auth key for configured ollama provider without apiKey", async () => {
|
||||
await withEnvAsync({ OLLAMA_API_KEY: undefined }, async () => {
|
||||
const resolved = await resolveApiKeyForProvider({
|
||||
provider: "ollama",
|
||||
store: { version: 1, profiles: {} },
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
ollama: {
|
||||
baseUrl: "http://gpu-node-server:11434",
|
||||
api: "openai-completions",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(resolved.apiKey).toBe("ollama-local");
|
||||
expect(resolved.mode).toBe("api-key");
|
||||
expect(resolved.source).toContain("synthetic local key");
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers explicit OLLAMA_API_KEY over synthetic local key", async () => {
|
||||
await withEnvAsync({ OLLAMA_API_KEY: "env-ollama-key" }, async () => {
|
||||
const resolved = await resolveApiKeyForProvider({
|
||||
provider: "ollama",
|
||||
store: { version: 1, profiles: {} },
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
ollama: {
|
||||
baseUrl: "http://gpu-node-server:11434",
|
||||
api: "openai-completions",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(resolved.apiKey).toBe("env-ollama-key");
|
||||
expect(resolved.source).toContain("OLLAMA_API_KEY");
|
||||
});
|
||||
});
|
||||
|
||||
it("still throws for ollama when no env/profile/config provider is available", async () => {
|
||||
await withEnvAsync({ OLLAMA_API_KEY: undefined }, async () => {
|
||||
await expect(
|
||||
resolveApiKeyForProvider({
|
||||
provider: "ollama",
|
||||
store: { version: 1, profiles: {} },
|
||||
}),
|
||||
).rejects.toThrow('No API key found for provider "ollama".');
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves Vercel AI Gateway API key from env", async () => {
|
||||
await withEnvAsync({ AI_GATEWAY_API_KEY: "gateway-test-key" }, async () => {
|
||||
const resolved = await resolveApiKeyForProvider({
|
||||
|
||||
@@ -67,6 +67,35 @@ function resolveProviderAuthOverride(
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function resolveSyntheticLocalProviderAuth(params: {
|
||||
cfg: OpenClawConfig | undefined;
|
||||
provider: string;
|
||||
}): ResolvedProviderAuth | null {
|
||||
const normalizedProvider = normalizeProviderId(params.provider);
|
||||
if (normalizedProvider !== "ollama") {
|
||||
return null;
|
||||
}
|
||||
|
||||
const providerConfig = resolveProviderConfig(params.cfg, params.provider);
|
||||
if (!providerConfig) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hasApiConfig =
|
||||
Boolean(providerConfig.api?.trim()) ||
|
||||
Boolean(providerConfig.baseUrl?.trim()) ||
|
||||
(Array.isArray(providerConfig.models) && providerConfig.models.length > 0);
|
||||
if (!hasApiConfig) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
apiKey: "ollama-local",
|
||||
source: "models.providers.ollama (synthetic local key)",
|
||||
mode: "api-key",
|
||||
};
|
||||
}
|
||||
|
||||
function resolveEnvSourceLabel(params: {
|
||||
applied: Set<string>;
|
||||
envVars: string[];
|
||||
@@ -207,6 +236,11 @@ export async function resolveApiKeyForProvider(params: {
|
||||
return { apiKey: customKey, source: "models.json", mode: "api-key" };
|
||||
}
|
||||
|
||||
const syntheticLocalAuth = resolveSyntheticLocalProviderAuth({ cfg, provider });
|
||||
if (syntheticLocalAuth) {
|
||||
return syntheticLocalAuth;
|
||||
}
|
||||
|
||||
const normalized = normalizeProviderId(provider);
|
||||
if (authOverride === undefined && normalized === "amazon-bedrock") {
|
||||
return resolveAwsSdkAuthInfo();
|
||||
|
||||
Reference in New Issue
Block a user