37 lines
1.3 KiB
TypeScript
37 lines
1.3 KiB
TypeScript
import type { OpenClawConfig } from "../config/config.js";
|
|
import type { WizardPrompter } from "../wizard/prompts.js";
|
|
import {
|
|
applyProviderDefaultModel,
|
|
promptAndConfigureOpenAICompatibleSelfHostedProvider,
|
|
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
|
|
SELF_HOSTED_DEFAULT_COST,
|
|
SELF_HOSTED_DEFAULT_MAX_TOKENS,
|
|
} from "./self-hosted-provider-setup.js";
|
|
|
|
export const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1";
|
|
export const VLLM_DEFAULT_CONTEXT_WINDOW = SELF_HOSTED_DEFAULT_CONTEXT_WINDOW;
|
|
export const VLLM_DEFAULT_MAX_TOKENS = SELF_HOSTED_DEFAULT_MAX_TOKENS;
|
|
export const VLLM_DEFAULT_COST = SELF_HOSTED_DEFAULT_COST;
|
|
|
|
export async function promptAndConfigureVllm(params: {
|
|
cfg: OpenClawConfig;
|
|
prompter: WizardPrompter;
|
|
}): Promise<{ config: OpenClawConfig; modelId: string; modelRef: string }> {
|
|
const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({
|
|
cfg: params.cfg,
|
|
prompter: params.prompter,
|
|
providerId: "vllm",
|
|
providerLabel: "vLLM",
|
|
defaultBaseUrl: VLLM_DEFAULT_BASE_URL,
|
|
defaultApiKeyEnvVar: "VLLM_API_KEY",
|
|
modelPlaceholder: "meta-llama/Meta-Llama-3-8B-Instruct",
|
|
});
|
|
return {
|
|
config: result.config,
|
|
modelId: result.modelId,
|
|
modelRef: result.modelRef,
|
|
};
|
|
}
|
|
|
|
export { applyProviderDefaultModel as applyVllmDefaultModel };
|