Files
openclaw/src/memory/embeddings-gemini.ts
Rodrigo Uroz 7f1712c1ba (fix): enforce embedding model token limit to prevent overflow (#13455)
* fix: enforce embedding model token limit to prevent 8192 overflow

- Replace EMBEDDING_APPROX_CHARS_PER_TOKEN=1 with UTF-8 byte length
  estimation (safe upper bound for tokenizer output)
- Add EMBEDDING_MODEL_MAX_TOKENS=8192 hard cap
- Add splitChunkToTokenLimit() that binary-searches for the largest
  safe split point, with surrogate pair handling
- Add enforceChunkTokenLimit() wrapper called in indexFile() after
  chunkMarkdown(), before any embedding API call
- Fixes: session files with large JSONL entries could produce chunks
  exceeding text-embedding-3-small's 8192 token limit

Tests: 2 new colocated tests in manager.embedding-token-limit.test.ts
- Verifies oversized ASCII chunks are split to <=8192 bytes each
- Verifies multibyte (emoji) content batching respects byte limits

* fix: make embedding token limit provider-aware

- Add optional maxInputTokens to EmbeddingProvider interface
- Each provider (openai, gemini, voyage) reports its own limit
- Known-limits map as fallback: openai 8192, gemini 2048, voyage 32K
- Resolution: provider field > known map > default 8192
- Backward compatible: local/llama uses fallback

* fix: enforce embedding input size limits (#13455) (thanks @rodrigouroz)

---------

Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
2026-02-10 20:10:17 -06:00

170 lines
5.4 KiB
TypeScript

import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js";
import { requireApiKey, resolveApiKeyForProvider } from "../agents/model-auth.js";
import { isTruthyEnvValue } from "../infra/env.js";
import { createSubsystemLogger } from "../logging/subsystem.js";
export type GeminiEmbeddingClient = {
baseUrl: string;
headers: Record<string, string>;
model: string;
modelPath: string;
};
const DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta";
export const DEFAULT_GEMINI_EMBEDDING_MODEL = "gemini-embedding-001";
const GEMINI_MAX_INPUT_TOKENS: Record<string, number> = {
"text-embedding-004": 2048,
};
const debugEmbeddings = isTruthyEnvValue(process.env.OPENCLAW_DEBUG_MEMORY_EMBEDDINGS);
const log = createSubsystemLogger("memory/embeddings");
const debugLog = (message: string, meta?: Record<string, unknown>) => {
if (!debugEmbeddings) {
return;
}
const suffix = meta ? ` ${JSON.stringify(meta)}` : "";
log.raw(`${message}${suffix}`);
};
function resolveRemoteApiKey(remoteApiKey?: string): string | undefined {
const trimmed = remoteApiKey?.trim();
if (!trimmed) {
return undefined;
}
if (trimmed === "GOOGLE_API_KEY" || trimmed === "GEMINI_API_KEY") {
return process.env[trimmed]?.trim();
}
return trimmed;
}
function normalizeGeminiModel(model: string): string {
const trimmed = model.trim();
if (!trimmed) {
return DEFAULT_GEMINI_EMBEDDING_MODEL;
}
const withoutPrefix = trimmed.replace(/^models\//, "");
if (withoutPrefix.startsWith("gemini/")) {
return withoutPrefix.slice("gemini/".length);
}
if (withoutPrefix.startsWith("google/")) {
return withoutPrefix.slice("google/".length);
}
return withoutPrefix;
}
function normalizeGeminiBaseUrl(raw: string): string {
const trimmed = raw.replace(/\/+$/, "");
const openAiIndex = trimmed.indexOf("/openai");
if (openAiIndex > -1) {
return trimmed.slice(0, openAiIndex);
}
return trimmed;
}
function buildGeminiModelPath(model: string): string {
return model.startsWith("models/") ? model : `models/${model}`;
}
export async function createGeminiEmbeddingProvider(
options: EmbeddingProviderOptions,
): Promise<{ provider: EmbeddingProvider; client: GeminiEmbeddingClient }> {
const client = await resolveGeminiEmbeddingClient(options);
const baseUrl = client.baseUrl.replace(/\/$/, "");
const embedUrl = `${baseUrl}/${client.modelPath}:embedContent`;
const batchUrl = `${baseUrl}/${client.modelPath}:batchEmbedContents`;
const embedQuery = async (text: string): Promise<number[]> => {
if (!text.trim()) {
return [];
}
const res = await fetch(embedUrl, {
method: "POST",
headers: client.headers,
body: JSON.stringify({
content: { parts: [{ text }] },
taskType: "RETRIEVAL_QUERY",
}),
});
if (!res.ok) {
const payload = await res.text();
throw new Error(`gemini embeddings failed: ${res.status} ${payload}`);
}
const payload = (await res.json()) as { embedding?: { values?: number[] } };
return payload.embedding?.values ?? [];
};
const embedBatch = async (texts: string[]): Promise<number[][]> => {
if (texts.length === 0) {
return [];
}
const requests = texts.map((text) => ({
model: client.modelPath,
content: { parts: [{ text }] },
taskType: "RETRIEVAL_DOCUMENT",
}));
const res = await fetch(batchUrl, {
method: "POST",
headers: client.headers,
body: JSON.stringify({ requests }),
});
if (!res.ok) {
const payload = await res.text();
throw new Error(`gemini embeddings failed: ${res.status} ${payload}`);
}
const payload = (await res.json()) as { embeddings?: Array<{ values?: number[] }> };
const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : [];
return texts.map((_, index) => embeddings[index]?.values ?? []);
};
return {
provider: {
id: "gemini",
model: client.model,
maxInputTokens: GEMINI_MAX_INPUT_TOKENS[client.model],
embedQuery,
embedBatch,
},
client,
};
}
export async function resolveGeminiEmbeddingClient(
options: EmbeddingProviderOptions,
): Promise<GeminiEmbeddingClient> {
const remote = options.remote;
const remoteApiKey = resolveRemoteApiKey(remote?.apiKey);
const remoteBaseUrl = remote?.baseUrl?.trim();
const apiKey = remoteApiKey
? remoteApiKey
: requireApiKey(
await resolveApiKeyForProvider({
provider: "google",
cfg: options.config,
agentDir: options.agentDir,
}),
"google",
);
const providerConfig = options.config.models?.providers?.google;
const rawBaseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_GEMINI_BASE_URL;
const baseUrl = normalizeGeminiBaseUrl(rawBaseUrl);
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
const headers: Record<string, string> = {
"Content-Type": "application/json",
"x-goog-api-key": apiKey,
...headerOverrides,
};
const model = normalizeGeminiModel(options.model);
const modelPath = buildGeminiModelPath(model);
debugLog("memory embeddings: gemini client", {
rawBaseUrl,
baseUrl,
model,
modelPath,
embedEndpoint: `${baseUrl}/${modelPath}:embedContent`,
batchEndpoint: `${baseUrl}/${modelPath}:batchEmbedContents`,
});
return { baseUrl, headers, model, modelPath };
}