fix(memory): discard stdout for qmd update/embed to prevent output cap failure (openclaw#28900) thanks @Glucksberg

Verified:
- pnpm install --frozen-lockfile
- pnpm build
- pnpm check
- pnpm test:macmini

Co-authored-by: Glucksberg <80581902+Glucksberg@users.noreply.github.com>
Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
Glucksberg
2026-03-01 14:16:50 -04:00
committed by GitHub
parent 11d34700c0
commit 134296276a
3 changed files with 41 additions and 5 deletions

View File

@@ -105,6 +105,7 @@ Docs: https://docs.openclaw.ai
- Slack/Socket Mode slash startup: treat `app.options()` registration as best-effort and fall back to static arg menus when listener registration fails, preventing Slack monitor startup crash loops on receiver init edge cases. (#21715)
- Slack/Legacy streaming config: map boolean `channels.slack.streaming=false` to unified streaming mode `off` (with `nativeStreaming=false`) so legacy configs correctly disable draft preview/native streaming instead of defaulting to `partial`. (#25990) Thanks @chilu18.
- Slack/Socket reconnect reliability: reconnect Socket Mode after disconnect/start failures using bounded exponential backoff with abort-aware waits, while preserving clean shutdown behavior and adding disconnect/error helper tests. (#27232) Thanks @pandego.
- Memory/QMD update+embed output cap: discard captured stdout for `qmd update` and `qmd embed` runs (while keeping stderr diagnostics) so large index progress output no longer fails sync with `produced too much output` during boot/refresh. (#28900) Thanks @Glucksberg.
- Onboarding/Custom providers: raise default custom-provider model context window to the runtime hard minimum (16k) and auto-heal existing custom model entries below that threshold during reconfiguration, preventing immediate `Model context window too small (4096 tokens)` failures. (#21653) Thanks @r4jiv007.
- Web UI/Assistant text: strip internal `<relevant-memories>...</relevant-memories>` scaffolding from rendered assistant messages (while preserving code-fence literals), preventing memory-context leakage in chat output for models that echo internal blocks. (#29851) Thanks @Valkster70.
- Dashboard/Sessions: allow authenticated Control UI clients to delete and patch sessions while still blocking regular webchat clients from session mutation RPCs, fixing Dashboard session delete failures. (#21264) Thanks @jskoiz.

View File

@@ -1761,6 +1761,25 @@ describe("QmdMemoryManager", () => {
}
});
it("succeeds on qmd update even when stdout exceeds the output cap", async () => {
// Regression test for #24966: large indexes produce >200K chars of stdout
// during `qmd update`, which used to fail with "produced too much output".
const largeOutput = "x".repeat(300_000);
spawnMock.mockImplementation((_cmd: string, args: string[]) => {
if (args[0] === "update") {
const child = createMockChild({ autoClose: false });
emitAndClose(child, "stdout", largeOutput);
return child;
}
return createMockChild();
});
const { manager } = await createManager({ mode: "status" });
// sync triggers runQmdUpdateOnce -> runQmd(["update"], { discardOutput: true })
await expect(manager.sync({ reason: "manual" })).resolves.toBeUndefined();
await manager.close();
});
it("scopes by channel for agent-prefixed session keys", async () => {
cfg = {
...cfg,

View File

@@ -886,7 +886,10 @@ export class QmdMemoryManager implements MemorySearchManager {
if (this.shouldRunEmbed(force)) {
try {
await runWithQmdEmbedLock(async () => {
await this.runQmd(["embed"], { timeoutMs: this.qmd.update.embedTimeoutMs });
await this.runQmd(["embed"], {
timeoutMs: this.qmd.update.embedTimeoutMs,
discardOutput: true,
});
});
this.lastEmbedAt = Date.now();
this.embedBackoffUntil = null;
@@ -926,12 +929,18 @@ export class QmdMemoryManager implements MemorySearchManager {
private async runQmdUpdateOnce(reason: string): Promise<void> {
try {
await this.runQmd(["update"], { timeoutMs: this.qmd.update.updateTimeoutMs });
await this.runQmd(["update"], {
timeoutMs: this.qmd.update.updateTimeoutMs,
discardOutput: true,
});
} catch (err) {
if (!(await this.tryRepairNullByteCollections(err, reason))) {
throw err;
}
await this.runQmd(["update"], { timeoutMs: this.qmd.update.updateTimeoutMs });
await this.runQmd(["update"], {
timeoutMs: this.qmd.update.updateTimeoutMs,
discardOutput: true,
});
}
}
@@ -1054,7 +1063,7 @@ export class QmdMemoryManager implements MemorySearchManager {
private async runQmd(
args: string[],
opts?: { timeoutMs?: number },
opts?: { timeoutMs?: number; discardOutput?: boolean },
): Promise<{ stdout: string; stderr: string }> {
return await new Promise((resolve, reject) => {
const child = spawn(resolveWindowsCommandShim(this.qmd.command), args, {
@@ -1065,6 +1074,10 @@ export class QmdMemoryManager implements MemorySearchManager {
let stderr = "";
let stdoutTruncated = false;
let stderrTruncated = false;
// When discardOutput is set, skip stdout accumulation entirely and keep
// only a small stderr tail for diagnostics -- never fail on truncation.
// This prevents large `qmd update` runs from hitting the output cap.
const discard = opts?.discardOutput === true;
const timer = opts?.timeoutMs
? setTimeout(() => {
child.kill("SIGKILL");
@@ -1072,6 +1085,9 @@ export class QmdMemoryManager implements MemorySearchManager {
}, opts.timeoutMs)
: null;
child.stdout.on("data", (data) => {
if (discard) {
return; // drain without accumulating
}
const next = appendOutputWithCap(stdout, data.toString("utf8"), this.maxQmdOutputChars);
stdout = next.text;
stdoutTruncated = stdoutTruncated || next.truncated;
@@ -1091,7 +1107,7 @@ export class QmdMemoryManager implements MemorySearchManager {
if (timer) {
clearTimeout(timer);
}
if (stdoutTruncated || stderrTruncated) {
if (!discard && (stdoutTruncated || stderrTruncated)) {
reject(
new Error(
`qmd ${args.join(" ")} produced too much output (limit ${this.maxQmdOutputChars} chars)`,