🧭 feat: Add OpenRouter Prompt Cache Setting (#13029)

* feat: add OpenRouter prompt cache setting

* fix: type OpenRouter schema lookup

* fix: honor proxied OpenRouter prompt cache

* refactor: flatten endpoint schema fallback

* chore: Bump `@librechat/agents` to version 3.1.82

* fix: Default OpenRouter prompt cache params

* test: Align OpenRouter config expectations

* test: Update OpenRouter default cache expectation

* fix: Align OpenRouter Detection

* chore: Bump `@librechat/agents` to version 3.1.83

* docs: Remove OpenRouter prompt cache setup note

* refactor: Use provider enum for OpenRouter defaults

* style: Format OpenRouter defaults guard
This commit is contained in:
Danny Avila 2026-05-09 11:46:09 -04:00 committed by GitHub
parent 0d5c2b339a
commit 8a654dc8b1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 487 additions and 38 deletions

View file

@ -45,7 +45,7 @@
"@azure/storage-blob": "^12.30.0",
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@librechat/agents": "^3.1.81",
"@librechat/agents": "^3.1.83",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",

View file

@ -3,6 +3,7 @@ const axios = require('axios');
const yaml = require('js-yaml');
const keyBy = require('lodash/keyBy');
const { loadYaml } = require('@librechat/api');
const { Providers } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const {
configSchema,
@ -17,6 +18,48 @@ const defaultConfigPath = path.resolve(projectRoot, 'librechat.yaml');
let i = 0;
const OPENROUTER_PROMPT_CACHE_DEFAULT = {
key: 'promptCache',
default: true,
};
function includesOpenRouter(value) {
return typeof value === 'string' && value.toLowerCase().includes(Providers.OPENROUTER);
}
function isOpenRouterEndpoint(endpoint) {
return includesOpenRouter(endpoint.name) || includesOpenRouter(endpoint.baseURL);
}
function shouldPreserveCustomParams(customParams) {
const defaultEndpoint = customParams?.defaultParamsEndpoint;
return (
defaultEndpoint && defaultEndpoint !== 'custom' && defaultEndpoint !== Providers.OPENROUTER
);
}
function addOpenRouterDefaults(endpoint) {
if (!isOpenRouterEndpoint(endpoint)) {
return;
}
if (shouldPreserveCustomParams(endpoint.customParams)) {
return;
}
const customParams = endpoint.customParams ?? {};
const paramDefinitions = customParams.paramDefinitions ?? [];
const hasPromptCache = paramDefinitions.some((param) => param.key === 'promptCache');
endpoint.customParams = {
...customParams,
defaultParamsEndpoint: Providers.OPENROUTER,
paramDefinitions: hasPromptCache
? paramDefinitions
: [...paramDefinitions, OPENROUTER_PROMPT_CACHE_DEFAULT],
};
}
/**
* Load custom configuration files and caches the object if the `cache` field at root is true.
* Validation via parsing the config file with the config schema.
@ -119,6 +162,8 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
}
}
(customConfig.endpoints?.custom ?? []).forEach(addOpenRouterDefaults);
(customConfig.endpoints?.custom ?? [])
.filter((endpoint) => endpoint.customParams)
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));

View file

@ -8,7 +8,19 @@ jest.mock('librechat-data-provider', () => {
const actual = jest.requireActual('librechat-data-provider');
return {
...actual,
paramSettings: { foo: {}, bar: {}, custom: {} },
paramSettings: {
foo: {},
bar: {},
custom: {},
openrouter: [
{
key: 'promptCache',
type: 'boolean',
component: 'switch',
default: true,
},
],
},
agentParamSettings: {
custom: [],
google: [
@ -195,7 +207,8 @@ describe('loadCustomConfig', () => {
};
process.env.CONFIG_PATH = 'validConfig.yaml';
loadYaml.mockReturnValueOnce(mockConfig);
await loadCustomConfig();
const result = await loadCustomConfig();
expect(result).toEqual(mockConfig);
});
it('should log the loaded custom config', async () => {
@ -297,7 +310,7 @@ describe('loadCustomConfig', () => {
it('throws an error when defaultParamsEndpoint is not provided', async () => {
const malformedCustomParams = { defaultParamsEndpoint: undefined };
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
'defaultParamsEndpoint of "Google" endpoint is invalid. Valid options are foo, bar, custom, google',
'defaultParamsEndpoint of "Google" endpoint is invalid. Valid options are foo, bar, custom, openrouter, google',
);
});
@ -340,5 +353,109 @@ describe('loadCustomConfig', () => {
},
]);
});
it('adds OpenRouter promptCache defaults when custom endpoint name is OpenRouter', async () => {
const openRouterConfig = {
version: '1.0',
cache: false,
endpoints: {
custom: [
{
name: 'OpenRouter',
apiKey: 'user_provided',
baseURL: 'https://proxy.example.com/v1',
models: {
default: ['anthropic/claude-sonnet-4.6'],
},
},
],
},
};
loadYaml.mockReturnValue(openRouterConfig);
const parsedConfig = await loadCustomConfig();
expect(parsedConfig.endpoints.custom[0].customParams).toEqual({
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [
{
columnSpan: 1,
component: 'switch',
default: true,
key: 'promptCache',
label: 'promptCache',
optionType: 'custom',
type: 'boolean',
},
],
});
});
it('adds OpenRouter promptCache defaults when custom endpoint URL is OpenRouter', async () => {
const openRouterConfig = {
version: '1.0',
cache: false,
endpoints: {
custom: [
{
name: 'Company Gateway',
apiKey: 'user_provided',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['anthropic/claude-sonnet-4.6'],
},
},
],
},
};
loadYaml.mockReturnValue(openRouterConfig);
const parsedConfig = await loadCustomConfig();
expect(parsedConfig.endpoints.custom[0].customParams).toMatchObject({
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [
{
default: true,
key: 'promptCache',
},
],
});
});
it('preserves explicit OpenRouter promptCache defaults', async () => {
const openRouterConfig = {
version: '1.0',
cache: false,
endpoints: {
custom: [
{
name: 'OpenRouter',
apiKey: 'user_provided',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['anthropic/claude-sonnet-4.6'],
},
customParams: {
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [{ key: 'promptCache', default: false }],
},
},
],
},
};
loadYaml.mockReturnValue(openRouterConfig);
const parsedConfig = await loadCustomConfig();
expect(parsedConfig.endpoints.custom[0].customParams.paramDefinitions).toEqual([
{
columnSpan: 1,
component: 'switch',
default: false,
key: 'promptCache',
label: 'promptCache',
optionType: 'custom',
type: 'boolean',
},
]);
});
});
});

10
package-lock.json generated
View file

@ -60,7 +60,7 @@
"@azure/storage-blob": "^12.30.0",
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@librechat/agents": "^3.1.81",
"@librechat/agents": "^3.1.83",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -12088,9 +12088,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "3.1.81",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.1.81.tgz",
"integrity": "sha512-5Ivq0qltFin+C63JyeVk254WxtxF1q2cict8BEaHisIIxNGn+DmNaL9SiK9GCmVVsi3mwGQA/wv6vEHWdiPvLw==",
"version": "3.1.83",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.1.83.tgz",
"integrity": "sha512-6d+GOrR9ORe0a+ofwcJLWXbEP5VIurKxu4bWnOPpLMj5+rPDeTPgfGrmGwPrREQPTkFWpMb8VkEb7iP2ve3XzA==",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.92.0",
@ -44658,7 +44658,7 @@
"@azure/storage-blob": "^12.30.0",
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@librechat/agents": "^3.1.81",
"@librechat/agents": "^3.1.83",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.29.0",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -98,7 +98,7 @@
"@azure/storage-blob": "^12.30.0",
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@librechat/agents": "^3.1.81",
"@librechat/agents": "^3.1.83",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.29.0",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -1,5 +1,7 @@
import { Providers } from '@librechat/agents';
import { ToolMessage, AIMessage, HumanMessage } from '@librechat/agents/langchain/messages';
import { extractDiscoveredToolsFromHistory } from './run';
import { extractDiscoveredToolsFromHistory, getReasoningKey } from './run';
describe('extractDiscoveredToolsFromHistory', () => {
it('extracts tool names from tool_search JSON output', () => {
@ -131,3 +133,17 @@ describe('extractDiscoveredToolsFromHistory', () => {
expect(discovered.size).toBe(0);
});
});
describe('getReasoningKey', () => {
it('detects OpenRouter baseURL case-insensitively', () => {
const llmConfig = {
configuration: {
baseURL: 'https://gateway.example/v1/OpenRouter',
},
} as Parameters<typeof getReasoningKey>[1];
const reasoningKey = getReasoningKey(Providers.OPENAI, llmConfig);
expect(reasoningKey).toBe('reasoning');
});
});

View file

@ -210,6 +210,10 @@ const customProviders = new Set([
KnownEndpoints.ollama,
]);
function includesOpenRouter(value?: string | null): boolean {
return typeof value === 'string' && value.toLowerCase().includes(KnownEndpoints.openrouter);
}
export function getReasoningKey(
provider: Providers,
llmConfig: t.RunLLMConfig,
@ -219,8 +223,8 @@ export function getReasoningKey(
if (provider === Providers.GOOGLE) {
reasoningKey = 'reasoning';
} else if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agentEndpoint && agentEndpoint.toLowerCase().includes(KnownEndpoints.openrouter))
includesOpenRouter(llmConfig.configuration?.baseURL) ||
includesOpenRouter(agentEndpoint)
) {
reasoningKey = 'reasoning';
} else if (

View file

@ -80,6 +80,7 @@ describe('getOpenAIConfig - Backward Compatibility', () => {
streaming: true,
model: 'qwen/qwen3-max',
include_reasoning: true,
promptCache: true,
apiKey: 'sk-xxxx',
},
configOptions: {

View file

@ -201,6 +201,7 @@ describe('getOpenAIConfig', () => {
'X-OpenRouter-Categories': 'general-chat,personal-agent',
});
expect(result.llmConfig.include_reasoning).toBe(true);
expect(result.llmConfig.promptCache).toBe(true);
expect(result.provider).toBe('openrouter');
});
@ -784,6 +785,46 @@ describe('getOpenAIConfig', () => {
const result = getOpenAIConfig(mockApiKey, {}, 'openrouter');
expect(result.llmConfig.include_reasoning).toBe(true);
expect(result.llmConfig.promptCache).toBe(true);
expect(result.provider).toBe('openrouter');
});
it('should detect OpenRouter from baseURL case-insensitively', () => {
const result = getOpenAIConfig(mockApiKey, {
reverseProxyUrl: 'https://gateway.example/v1/OpenRouter',
});
expect(result.llmConfig.include_reasoning).toBe(true);
expect(result.llmConfig.promptCache).toBe(true);
expect(result.provider).toBe('openrouter');
});
it('should preserve explicit promptCache false for OpenRouter', () => {
const result = getOpenAIConfig(
mockApiKey,
{
customParams: {
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [{ key: 'promptCache', default: false }],
},
},
'openrouter',
);
expect(result.llmConfig.promptCache).toBeUndefined();
expect(result.provider).toBe('openrouter');
});
it('should honor dropParams for the OpenRouter promptCache default', () => {
const result = getOpenAIConfig(
mockApiKey,
{
dropParams: ['promptCache'],
},
'openrouter',
);
expect(result.llmConfig.promptCache).toBeUndefined();
expect(result.provider).toBe('openrouter');
});
@ -1464,6 +1505,10 @@ describe('getOpenAIConfig', () => {
top_k: 50,
repetition_penalty: 1.1,
},
customParams: {
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [{ key: 'promptCache', default: true }],
},
modelOptions: {
model: 'anthropic/claude-3-sonnet',
user: 'openrouter-user',
@ -1482,6 +1527,7 @@ describe('getOpenAIConfig', () => {
temperature: 0.7,
maxTokens: 4000,
apiKey: apiKey,
promptCache: true,
});
expect(result.llmConfig.include_reasoning).toBeUndefined();
expect(result.llmConfig.modelKwargs).toMatchObject({
@ -1496,6 +1542,47 @@ describe('getOpenAIConfig', () => {
});
expect(result.provider).toBe('openrouter');
});
it('should honor OpenRouter defaults for proxied custom endpoint names', () => {
const endpoint = 'company-gateway';
const apiKey = 'sk-proxy-key';
const baseURL = 'https://llm-proxy.example.com/v1';
const result = getOpenAIConfig(
apiKey,
{
reverseProxyUrl: baseURL,
headers: {
Authorization: `Bearer ${apiKey}`,
},
customParams: {
defaultParamsEndpoint: 'openrouter',
paramDefinitions: [{ key: 'promptCache', default: true }],
},
modelOptions: {
model: 'anthropic/claude-sonnet-4.6',
reasoning_effort: ReasoningEffort.high,
},
},
endpoint,
);
expect(result.llmConfig).toMatchObject({
model: 'anthropic/claude-sonnet-4.6',
apiKey,
promptCache: true,
});
expect(result.llmConfig.include_reasoning).toBeUndefined();
expect(result.llmConfig.modelKwargs).toMatchObject({
reasoning: { effort: ReasoningEffort.high },
});
expect(result.configOptions?.baseURL).toBe(baseURL);
expect(result.configOptions?.defaultHeaders).toMatchObject({
'X-OpenRouter-Title': 'LibreChat',
Authorization: `Bearer ${apiKey}`,
});
expect(result.provider).toBe('openrouter');
});
});
describe('Production-like Azure Scenarios', () => {

View file

@ -11,6 +11,29 @@ import { createFetch } from '~/utils/generators';
type Fetch = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
const OPENROUTER_DEFAULT_PARAMS = { promptCache: true };
function includesOpenRouter(value?: string | null): boolean {
return typeof value === 'string' && value.toLowerCase().includes(KnownEndpoints.openrouter);
}
function getDefaultParams({
customDefaultParams,
useOpenRouter,
}: {
customDefaultParams?: Record<string, unknown>;
useOpenRouter: boolean;
}): Record<string, unknown> | undefined {
if (!useOpenRouter) {
return customDefaultParams;
}
return {
...OPENROUTER_DEFAULT_PARAMS,
...customDefaultParams,
};
}
function mergeHeadersPreservingAnthropicBeta(
headers: Record<string, string> | undefined,
defaultHeaders: Record<string, string>,
@ -54,24 +77,25 @@ export function getOpenAIConfig(
reverseProxyUrl: baseURL,
} = options;
/** Extract default params from customParams.paramDefinitions */
const defaultParams = extractDefaultParams(options.customParams?.paramDefinitions);
let llmConfig: t.OAIClientOptions;
let tools: t.LLMConfigResult['tools'];
const isAnthropic = options.customParams?.defaultParamsEndpoint === EModelEndpoint.anthropic;
const isGoogle = options.customParams?.defaultParamsEndpoint === EModelEndpoint.google;
const isOpenRouter = options.customParams?.defaultParamsEndpoint === KnownEndpoints.openrouter;
const useOpenRouter =
!isAnthropic &&
!isGoogle &&
((baseURL && baseURL.includes(KnownEndpoints.openrouter)) ||
(endpoint != null && endpoint.toLowerCase().includes(KnownEndpoints.openrouter)));
(isOpenRouter || includesOpenRouter(baseURL) || includesOpenRouter(endpoint));
const isVercel =
!isAnthropic &&
!isGoogle &&
((baseURL && baseURL.includes('ai-gateway.vercel.sh')) ||
(endpoint != null && endpoint.toLowerCase().includes(KnownEndpoints.vercel)));
const defaultParams = getDefaultParams({
customDefaultParams: extractDefaultParams(options.customParams?.paramDefinitions),
useOpenRouter: Boolean(useOpenRouter),
});
let azure = options.azure;
let headers = options.headers;

View file

@ -675,6 +675,68 @@ describe('getOpenAILLMConfig', () => {
expect(result.llmConfig).toHaveProperty('include_reasoning', true);
expect(result.llmConfig).not.toHaveProperty('reasoning');
});
it('should pass promptCache only for OpenRouter', () => {
const openRouterResult = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
useOpenRouter: true,
modelOptions: {
model: 'anthropic/claude-sonnet-4.6',
promptCache: true,
} as Partial<t.OpenAIParameters & { promptCache?: boolean }>,
});
const openAIResult = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
useOpenRouter: false,
modelOptions: {
model: 'gpt-4o',
promptCache: true,
} as Partial<t.OpenAIParameters & { promptCache?: boolean }>,
});
expect(openRouterResult.llmConfig).toHaveProperty('promptCache', true);
expect(openRouterResult.llmConfig.modelKwargs).toBeUndefined();
expect(openAIResult.llmConfig).not.toHaveProperty('promptCache');
expect(openAIResult.llmConfig.modelKwargs).toBeUndefined();
});
it('should resolve OpenRouter promptCache default/add/drop params', () => {
const enabled = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
useOpenRouter: true,
defaultParams: { promptCache: true },
modelOptions: {
model: 'anthropic/claude-sonnet-4.6',
},
});
const disabled = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
useOpenRouter: true,
defaultParams: { promptCache: true },
addParams: { promptCache: false },
modelOptions: {
model: 'anthropic/claude-sonnet-4.6',
},
});
const dropped = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
useOpenRouter: true,
defaultParams: { promptCache: true },
dropParams: ['promptCache'],
modelOptions: {
model: 'anthropic/claude-sonnet-4.6',
},
});
expect(enabled.llmConfig).toHaveProperty('promptCache', true);
expect(disabled.llmConfig).not.toHaveProperty('promptCache');
expect(dropped.llmConfig).not.toHaveProperty('promptCache');
});
});
describe('Verbosity Handling', () => {

View file

@ -150,10 +150,11 @@ export function getOpenAILLMConfig({
reasoning_summary,
verbosity,
web_search,
promptCache,
frequency_penalty,
presence_penalty,
...modelOptions
} = cleanedModelOptions;
} = cleanedModelOptions as Partial<t.OpenAIParameters & { promptCache?: boolean }>;
const llmConfig = Object.assign(
{
@ -179,17 +180,23 @@ export function getOpenAILLMConfig({
}
let enableWebSearch = web_search;
let enablePromptCache = promptCache;
/** Apply defaultParams first - only if fields are undefined */
if (defaultParams && typeof defaultParams === 'object') {
for (const [key, value] of Object.entries(defaultParams)) {
/** Handle web_search separately - don't add to config */
if (key === 'web_search') {
if (enableWebSearch === undefined && typeof value === 'boolean') {
enableWebSearch = value;
}
continue;
}
if (key === 'promptCache') {
if (enablePromptCache === undefined && typeof value === 'boolean') {
enablePromptCache = value;
}
continue;
}
if (knownOpenAIParams.has(key)) {
applyDefaultParams(llmConfig as Record<string, unknown>, { [key]: value });
@ -206,13 +213,18 @@ export function getOpenAILLMConfig({
/** Apply addParams - can override defaultParams */
if (addParams && typeof addParams === 'object') {
for (const [key, value] of Object.entries(addParams)) {
/** Handle web_search directly here instead of adding to modelKwargs or llmConfig */
if (key === 'web_search') {
if (typeof value === 'boolean') {
enableWebSearch = value;
}
continue;
}
if (key === 'promptCache') {
if (typeof value === 'boolean') {
enablePromptCache = value;
}
continue;
}
if (knownOpenAIParams.has(key)) {
(llmConfig as Record<string, unknown>)[key] = value;
} else {
@ -263,6 +275,9 @@ export function getOpenAILLMConfig({
if (dropParams && dropParams.includes('web_search')) {
enableWebSearch = false;
}
if (dropParams && dropParams.includes('promptCache')) {
enablePromptCache = false;
}
if (useOpenRouter && enableWebSearch) {
/** OpenRouter expects web search as a plugins parameter */
@ -273,6 +288,9 @@ export function getOpenAILLMConfig({
llmConfig.useResponsesApi = true;
tools.push({ type: 'web_search' });
}
if (useOpenRouter && enablePromptCache === true) {
llmConfig.promptCache = true;
}
/**
* Note: OpenAI reasoning models (o1/o3/gpt-5) do not support temperature and other sampling parameters

View file

@ -30,6 +30,7 @@ export type OpenAIConfiguration = OpenAIClientOptions['configuration'];
export type OAIClientOptions = OpenAIClientOptions & {
include_reasoning?: boolean;
promptCache?: boolean;
_lc_stream_delay?: number;
};

View file

@ -1,6 +1,6 @@
import { replaceSpecialVars, parseConvo, parseCompactConvo, parseTextParts } from '../src/parsers';
import { specialVariables } from '../src/config';
import { EModelEndpoint } from '../src/schemas';
import { EModelEndpoint, Providers } from '../src/schemas';
import { ContentTypes } from '../src/types/runs';
import type { TMessageContentParts } from '../src/types/assistants';
import type { TUser, TConversation } from '../src/types';
@ -409,6 +409,26 @@ describe('parseConvo - defaultParamsEndpoint', () => {
expect(result?.topK).toBe(40);
});
test('should preserve promptCache when defaultParamsEndpoint is openrouter', () => {
const conversation: Partial<TConversation> = {
model: 'anthropic/claude-sonnet-4.6',
temperature: 0.7,
max_tokens: 8192,
promptCache: true,
};
const result = parseConvo({
endpoint: 'OpenRouter' as EModelEndpoint,
endpointType: EModelEndpoint.custom,
conversation,
defaultParamsEndpoint: Providers.OPENROUTER,
});
expect(result).not.toBeNull();
expect(result?.max_tokens).toBe(8192);
expect(result?.promptCache).toBe(true);
});
test('should not strip fields from non-custom endpoints that already have a schema', () => {
const conversation: Partial<TConversation> = {
model: 'gpt-4o',
@ -527,6 +547,25 @@ describe('parseCompactConvo - defaultParamsEndpoint', () => {
expect(result?.maxOutputTokens).toBe(8192);
});
test('should preserve promptCache when compacting OpenRouter custom endpoints', () => {
const conversation: Partial<TConversation> = {
model: 'anthropic/claude-sonnet-4.6',
promptCache: true,
iconURL: 'https://example.com/icon.png',
};
const result = parseCompactConvo({
endpoint: 'OpenRouter' as EModelEndpoint,
endpointType: EModelEndpoint.custom,
conversation,
defaultParamsEndpoint: Providers.OPENROUTER,
});
expect(result).not.toBeNull();
expect(result?.promptCache).toBe(true);
expect(result?.['iconURL']).toBeUndefined();
});
test('should fall back to endpointType when defaultParamsEndpoint is null', () => {
const conversation: Partial<TConversation> = {
model: 'gpt-4o',

View file

@ -6,6 +6,7 @@ import {
EModelEndpoint,
openAISettings,
googleSettings,
Providers,
ReasoningEffort,
AnthropicEffort,
ReasoningSummary,
@ -791,6 +792,8 @@ const openAI: SettingsConfiguration = [
librechat.fileTokenLimit,
];
const openRouter: SettingsConfiguration = [...openAI, anthropic.promptCache];
const openAICol1: SettingsConfiguration = [
baseDefinitions.model as SettingDefinition,
librechat.modelLabel,
@ -1050,6 +1053,7 @@ export const paramSettings: Record<string, SettingsConfiguration | undefined> =
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.azureOpenAI]: openAI,
[EModelEndpoint.custom]: openAI,
[Providers.OPENROUTER]: openRouter,
[EModelEndpoint.anthropic]: anthropicConfig,
[`${EModelEndpoint.bedrock}-${BedrockProviders.Anthropic}`]: bedrockAnthropic,
[`${EModelEndpoint.bedrock}-${BedrockProviders.MistralAI}`]: bedrockMistral,
@ -1086,6 +1090,10 @@ export const presetSettings: Record<
[EModelEndpoint.openAI]: openAIColumns,
[EModelEndpoint.azureOpenAI]: openAIColumns,
[EModelEndpoint.custom]: openAIColumns,
[Providers.OPENROUTER]: {
col1: openAICol1,
col2: [...openAICol2, anthropic.promptCache],
},
[EModelEndpoint.anthropic]: {
col1: anthropicCol1,
col2: anthropicCol2,

View file

@ -6,8 +6,10 @@ import type * as t from './types';
import { ContentTypes } from './types/runs';
import {
openAISchema,
openRouterSchema,
googleSchema,
EModelEndpoint,
Providers,
anthropicSchema,
assistantSchema,
// agentsSchema,
@ -20,6 +22,7 @@ import { alternateName } from './config';
type EndpointSchema =
| typeof openAISchema
| typeof openRouterSchema
| typeof googleSchema
| typeof anthropicSchema
| typeof assistantSchema
@ -27,11 +30,13 @@ type EndpointSchema =
| typeof bedrockInputSchema;
export type EndpointSchemaKey = EModelEndpoint;
type EndpointSchemaLookupKey = EModelEndpoint | Providers.OPENROUTER;
const endpointSchemas: Record<EndpointSchemaKey, EndpointSchema> = {
const endpointSchemas: Record<EndpointSchemaLookupKey, EndpointSchema> = {
[EModelEndpoint.openAI]: openAISchema,
[EModelEndpoint.azureOpenAI]: openAISchema,
[EModelEndpoint.custom]: openAISchema,
[Providers.OPENROUTER]: openRouterSchema,
[EModelEndpoint.google]: googleSchema,
[EModelEndpoint.anthropic]: anthropicSchema,
[EModelEndpoint.assistants]: assistantSchema,
@ -40,6 +45,24 @@ const endpointSchemas: Record<EndpointSchemaKey, EndpointSchema> = {
[EModelEndpoint.bedrock]: bedrockInputSchema,
};
const isEndpointSchemaLookupKey = (value?: string | null): value is EndpointSchemaLookupKey =>
value != null && Object.prototype.hasOwnProperty.call(endpointSchemas, value);
const getFallbackEndpointSchema = <TSchema>(
schemas: Record<EndpointSchemaLookupKey, TSchema>,
endpointType?: EndpointSchemaKey | null,
defaultParamsEndpoint?: string | null,
): TSchema | undefined => {
if (!endpointType) {
return undefined;
}
const overrideSchema = isEndpointSchemaLookupKey(defaultParamsEndpoint)
? schemas[defaultParamsEndpoint]
: undefined;
return overrideSchema ?? schemas[endpointType];
};
// const schemaCreators: Record<EModelEndpoint, (customSchema: DefaultSchemaValues) => EndpointSchema> = {
// [EModelEndpoint.google]: createGoogleSchema,
// };
@ -152,17 +175,15 @@ export const parseConvo = ({
possibleValues?: TPossibleValues;
defaultParamsEndpoint?: string | null;
}) => {
let schema = endpointSchemas[endpoint] as EndpointSchema | undefined;
const primarySchema = endpointSchemas[endpoint] as EndpointSchema | undefined;
if (!schema && !endpointType) {
if (!primarySchema && !endpointType) {
throw new Error(`Unknown endpoint: ${endpoint}`);
} else if (!schema) {
const overrideSchema = defaultParamsEndpoint
? endpointSchemas[defaultParamsEndpoint as EndpointSchemaKey]
: undefined;
schema = overrideSchema ?? (endpointType ? endpointSchemas[endpointType] : undefined);
}
const schema =
primarySchema ??
getFallbackEndpointSchema(endpointSchemas, endpointType, defaultParamsEndpoint);
const convo = schema?.parse(conversation) as s.TConversation | undefined;
const { models } = possibleValues ?? {};
@ -289,13 +310,15 @@ type CompactEndpointSchema =
| typeof compactAssistantSchema
| typeof compactAgentsSchema
| typeof compactGoogleSchema
| typeof openRouterSchema
| typeof anthropicSchema
| typeof bedrockInputSchema;
const compactEndpointSchemas: Record<EndpointSchemaKey, CompactEndpointSchema> = {
const compactEndpointSchemas: Record<EndpointSchemaLookupKey, CompactEndpointSchema> = {
[EModelEndpoint.openAI]: openAISchema,
[EModelEndpoint.azureOpenAI]: openAISchema,
[EModelEndpoint.custom]: openAISchema,
[Providers.OPENROUTER]: openRouterSchema,
[EModelEndpoint.assistants]: compactAssistantSchema,
[EModelEndpoint.azureAssistants]: compactAssistantSchema,
[EModelEndpoint.agents]: compactAgentsSchema,
@ -321,17 +344,16 @@ export const parseCompactConvo = ({
throw new Error(`undefined endpoint: ${endpoint}`);
}
let schema = compactEndpointSchemas[endpoint] as CompactEndpointSchema | undefined;
const primarySchema = compactEndpointSchemas[endpoint] as CompactEndpointSchema | undefined;
if (!schema && !endpointType) {
if (!primarySchema && !endpointType) {
throw new Error(`Unknown endpoint: ${endpoint}`);
} else if (!schema) {
const overrideSchema = defaultParamsEndpoint
? compactEndpointSchemas[defaultParamsEndpoint as EndpointSchemaKey]
: undefined;
schema = overrideSchema ?? (endpointType ? compactEndpointSchemas[endpointType] : undefined);
}
const schema =
primarySchema ??
getFallbackEndpointSchema(compactEndpointSchemas, endpointType, defaultParamsEndpoint);
if (!schema) {
throw new Error(`Unknown endpointType: ${endpointType}`);
}

View file

@ -1250,6 +1250,11 @@ export const openAISchema = openAIBaseSchema
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
.catch(() => ({}));
export const openRouterSchema = openAIBaseSchema
.merge(tConversationSchema.pick({ promptCache: true }))
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
.catch(() => ({}));
export const compactGoogleSchema = googleBaseSchema
.transform((obj) => {
const newObj: Partial<TConversation> = { ...obj };