Files
openclaw/src/agents/pi-embedded-runner-extraparams.e2e.test.ts
Vignesh b0a01fe482 Agents/Tools: preflight exec script files for shell var injection (#18457)
* fix(agents): don't force store=true for codex responses

* test: stabilize respawn + subagent usage assertions

* Agents/Tools: preflight exec to detect shell variable injection in scripts

* Changelog: fix merge marker formatting
2026-02-16 10:34:29 -08:00

181 lines
5.3 KiB
TypeScript

import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { Context, Model, SimpleStreamOptions } from "@mariozechner/pi-ai";
import { AssistantMessageEventStream } from "@mariozechner/pi-ai";
import { describe, expect, it } from "vitest";
import { applyExtraParamsToAgent, resolveExtraParams } from "./pi-embedded-runner.js";
describe("resolveExtraParams", () => {
it("returns undefined with no model config", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toBeUndefined();
});
it("returns params for exact provider/model key", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4": {
params: {
temperature: 0.7,
maxTokens: 2048,
},
},
},
},
},
},
provider: "openai",
modelId: "gpt-4",
});
expect(result).toEqual({
temperature: 0.7,
maxTokens: 2048,
});
});
it("ignores unrelated model entries", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4": {
params: {
temperature: 0.7,
},
},
},
},
},
},
provider: "openai",
modelId: "gpt-4.1-mini",
});
expect(result).toBeUndefined();
});
});
describe("applyExtraParamsToAgent", () => {
function runStoreMutationCase(params: {
applyProvider: string;
applyModelId: string;
model:
| Model<"openai-responses">
| Model<"openai-codex-responses">
| Model<"openai-completions">;
options?: SimpleStreamOptions;
}) {
const payload = { store: false };
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return new AssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, params.applyProvider, params.applyModelId);
const context: Context = { messages: [] };
void agent.streamFn?.(params.model, context, params.options ?? {});
return payload;
}
it("adds OpenRouter attribution headers to stream options", () => {
const calls: Array<SimpleStreamOptions | undefined> = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
calls.push(options);
return new AssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "openrouter/auto");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "openrouter/auto",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, { headers: { "X-Custom": "1" } });
expect(calls).toHaveLength(1);
expect(calls[0]?.headers).toEqual({
"HTTP-Referer": "https://openclaw.ai",
"X-Title": "OpenClaw",
"X-Custom": "1",
});
});
it("forces store=true for direct OpenAI Responses payloads", () => {
const payload = runStoreMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5",
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
} as Model<"openai-responses">,
});
expect(payload.store).toBe(true);
});
it("does not force store for OpenAI Responses routed through non-OpenAI base URLs", () => {
const payload = runStoreMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5",
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5",
baseUrl: "https://proxy.example.com/v1",
} as Model<"openai-responses">,
});
expect(payload.store).toBe(false);
});
it("does not force store=true for Codex responses (Codex requires store=false)", () => {
const payload = runStoreMutationCase({
applyProvider: "openai-codex",
applyModelId: "codex-mini-latest",
model: {
api: "openai-codex-responses",
provider: "openai-codex",
id: "codex-mini-latest",
baseUrl: "https://chatgpt.com/backend-api/codex/responses",
} as Model<"openai-codex-responses">,
});
expect(payload.store).toBe(false);
});
it("does not force store=true for Codex responses (Codex requires store=false)", () => {
const payload = { store: false };
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return new AssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openai-codex", "codex-mini-latest");
const model = {
api: "openai-codex-responses",
provider: "openai-codex",
id: "codex-mini-latest",
baseUrl: "https://chatgpt.com/backend-api/codex/responses",
} as Model<"openai-codex-responses">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(payload.store).toBe(false);
});
});