* fix: enforce embedding model token limit to prevent 8192 overflow - Replace EMBEDDING_APPROX_CHARS_PER_TOKEN=1 with UTF-8 byte length estimation (safe upper bound for tokenizer output) - Add EMBEDDING_MODEL_MAX_TOKENS=8192 hard cap - Add splitChunkToTokenLimit() that binary-searches for the largest safe split point, with surrogate pair handling - Add enforceChunkTokenLimit() wrapper called in indexFile() after chunkMarkdown(), before any embedding API call - Fixes: session files with large JSONL entries could produce chunks exceeding text-embedding-3-small's 8192 token limit Tests: 2 new colocated tests in manager.embedding-token-limit.test.ts - Verifies oversized ASCII chunks are split to <=8192 bytes each - Verifies multibyte (emoji) content batching respects byte limits * fix: make embedding token limit provider-aware - Add optional maxInputTokens to EmbeddingProvider interface - Each provider (openai, gemini, voyage) reports its own limit - Known-limits map as fallback: openai 8192, gemini 2048, voyage 32K - Resolution: provider field > known map > default 8192 - Backward compatible: local/llama uses fallback * fix: enforce embedding input size limits (#13455) (thanks @rodrigouroz) --------- Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
99 lines
3.0 KiB
TypeScript
99 lines
3.0 KiB
TypeScript
import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js";
|
|
import { requireApiKey, resolveApiKeyForProvider } from "../agents/model-auth.js";
|
|
|
|
export type OpenAiEmbeddingClient = {
|
|
baseUrl: string;
|
|
headers: Record<string, string>;
|
|
model: string;
|
|
};
|
|
|
|
export const DEFAULT_OPENAI_EMBEDDING_MODEL = "text-embedding-3-small";
|
|
const DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1";
|
|
const OPENAI_MAX_INPUT_TOKENS: Record<string, number> = {
|
|
"text-embedding-3-small": 8192,
|
|
"text-embedding-3-large": 8192,
|
|
"text-embedding-ada-002": 8191,
|
|
};
|
|
|
|
export function normalizeOpenAiModel(model: string): string {
|
|
const trimmed = model.trim();
|
|
if (!trimmed) {
|
|
return DEFAULT_OPENAI_EMBEDDING_MODEL;
|
|
}
|
|
if (trimmed.startsWith("openai/")) {
|
|
return trimmed.slice("openai/".length);
|
|
}
|
|
return trimmed;
|
|
}
|
|
|
|
export async function createOpenAiEmbeddingProvider(
|
|
options: EmbeddingProviderOptions,
|
|
): Promise<{ provider: EmbeddingProvider; client: OpenAiEmbeddingClient }> {
|
|
const client = await resolveOpenAiEmbeddingClient(options);
|
|
const url = `${client.baseUrl.replace(/\/$/, "")}/embeddings`;
|
|
|
|
const embed = async (input: string[]): Promise<number[][]> => {
|
|
if (input.length === 0) {
|
|
return [];
|
|
}
|
|
const res = await fetch(url, {
|
|
method: "POST",
|
|
headers: client.headers,
|
|
body: JSON.stringify({ model: client.model, input }),
|
|
});
|
|
if (!res.ok) {
|
|
const text = await res.text();
|
|
throw new Error(`openai embeddings failed: ${res.status} ${text}`);
|
|
}
|
|
const payload = (await res.json()) as {
|
|
data?: Array<{ embedding?: number[] }>;
|
|
};
|
|
const data = payload.data ?? [];
|
|
return data.map((entry) => entry.embedding ?? []);
|
|
};
|
|
|
|
return {
|
|
provider: {
|
|
id: "openai",
|
|
model: client.model,
|
|
maxInputTokens: OPENAI_MAX_INPUT_TOKENS[client.model],
|
|
embedQuery: async (text) => {
|
|
const [vec] = await embed([text]);
|
|
return vec ?? [];
|
|
},
|
|
embedBatch: embed,
|
|
},
|
|
client,
|
|
};
|
|
}
|
|
|
|
export async function resolveOpenAiEmbeddingClient(
|
|
options: EmbeddingProviderOptions,
|
|
): Promise<OpenAiEmbeddingClient> {
|
|
const remote = options.remote;
|
|
const remoteApiKey = remote?.apiKey?.trim();
|
|
const remoteBaseUrl = remote?.baseUrl?.trim();
|
|
|
|
const apiKey = remoteApiKey
|
|
? remoteApiKey
|
|
: requireApiKey(
|
|
await resolveApiKeyForProvider({
|
|
provider: "openai",
|
|
cfg: options.config,
|
|
agentDir: options.agentDir,
|
|
}),
|
|
"openai",
|
|
);
|
|
|
|
const providerConfig = options.config.models?.providers?.openai;
|
|
const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_OPENAI_BASE_URL;
|
|
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
|
const headers: Record<string, string> = {
|
|
"Content-Type": "application/json",
|
|
Authorization: `Bearer ${apiKey}`,
|
|
...headerOverrides,
|
|
};
|
|
const model = normalizeOpenAiModel(options.model);
|
|
return { baseUrl, headers, model };
|
|
}
|