mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-05-13 16:07:30 +00:00
* 🛟 fix: Allow empty modelSpecs.list to unstick admin-panel saves The unconditional `.min(1)` on `specsConfigSchema.list` rejected an empty list even when `enforce: false`, leaving admin panels (which save fields path-granularly) with no atomic way to clear the list once it had been populated. Once an admin reached `list: [entry]` and deleted the only entry, every subsequent save failed schema validation and the section became stuck. Relax the schema to `.default([])`. The `.min(1)` was added in #5218 as part of bundled cleanup, not as a deliberate rule. Every consumer of `modelSpecs.list` already handles the empty/undefined case (`?.list`, `?? []`, length-checked), and `processModelSpecs` short-circuits to `undefined` when the list is empty so the runtime treats it as "no specs configured." No call site is load-bearing on length >= 1. Tighten the `buildEndpointOption.js` enforce guard from `?.list && ?.enforce` to `?.list?.length && ?.enforce`. Empty arrays are truthy in JS, so the existing guard would have entered the enforce branch on `list: []` and returned "No model spec selected" or "Invalid model spec" had `processModelSpecs` ever been bypassed. Add a runtime warn in `processModelSpecs` when `enforce: true` is configured alongside an empty list, so operators see the resulting "enforcement disabled" state in logs rather than silently getting a permissive runtime. Add coverage for the empty-list parse path in `config-schemas.spec.ts` and for the empty-list-with-enforce branch in `buildEndpointOption.spec.js`. * chore: update import order in config-schemas.spec.ts
267 lines
7.2 KiB
JavaScript
267 lines
7.2 KiB
JavaScript
/**
|
|
* Wrap parseCompactConvo: the REAL function runs, but jest can observe
|
|
* calls and return values. Must be declared before require('./buildEndpointOption')
|
|
* so the destructured reference in the middleware captures the wrapper.
|
|
*/
|
|
jest.mock('librechat-data-provider', () => {
|
|
const actual = jest.requireActual('librechat-data-provider');
|
|
return {
|
|
...actual,
|
|
parseCompactConvo: jest.fn((...args) => actual.parseCompactConvo(...args)),
|
|
};
|
|
});
|
|
|
|
const { EModelEndpoint, parseCompactConvo } = require('librechat-data-provider');
|
|
|
|
const mockBuildOptions = jest.fn((_endpoint, parsedBody) => ({
|
|
...parsedBody,
|
|
endpoint: _endpoint,
|
|
}));
|
|
|
|
jest.mock('~/server/services/Endpoints/azureAssistants', () => ({
|
|
buildOptions: mockBuildOptions,
|
|
}));
|
|
jest.mock('~/server/services/Endpoints/assistants', () => ({
|
|
buildOptions: mockBuildOptions,
|
|
}));
|
|
jest.mock('~/server/services/Endpoints/agents', () => ({
|
|
buildOptions: mockBuildOptions,
|
|
}));
|
|
|
|
jest.mock('~/models', () => ({
|
|
updateFilesUsage: jest.fn(),
|
|
}));
|
|
|
|
const mockGetEndpointsConfig = jest.fn();
|
|
jest.mock('~/server/services/Config', () => ({
|
|
getEndpointsConfig: (...args) => mockGetEndpointsConfig(...args),
|
|
}));
|
|
|
|
jest.mock('@librechat/api', () => ({
|
|
handleError: jest.fn(),
|
|
}));
|
|
|
|
const buildEndpointOption = require('./buildEndpointOption');
|
|
|
|
const createReq = (body, config = {}) => ({
|
|
body,
|
|
config,
|
|
baseUrl: '/api/chat',
|
|
});
|
|
|
|
const createRes = () => ({
|
|
status: jest.fn().mockReturnThis(),
|
|
json: jest.fn().mockReturnThis(),
|
|
});
|
|
|
|
describe('buildEndpointOption - defaultParamsEndpoint parsing', () => {
|
|
beforeEach(() => {
|
|
jest.clearAllMocks();
|
|
});
|
|
|
|
it('should pass defaultParamsEndpoint to parseCompactConvo and preserve maxOutputTokens', async () => {
|
|
mockGetEndpointsConfig.mockResolvedValue({
|
|
AnthropicClaude: {
|
|
type: EModelEndpoint.custom,
|
|
customParams: {
|
|
defaultParamsEndpoint: EModelEndpoint.anthropic,
|
|
},
|
|
},
|
|
});
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: 'AnthropicClaude',
|
|
endpointType: EModelEndpoint.custom,
|
|
model: 'anthropic/claude-opus-4.5',
|
|
temperature: 0.7,
|
|
maxOutputTokens: 8192,
|
|
topP: 0.9,
|
|
maxContextTokens: 50000,
|
|
},
|
|
{ modelSpecs: null },
|
|
);
|
|
|
|
await buildEndpointOption(req, createRes(), jest.fn());
|
|
|
|
expect(parseCompactConvo).toHaveBeenCalledWith(
|
|
expect.objectContaining({
|
|
defaultParamsEndpoint: EModelEndpoint.anthropic,
|
|
}),
|
|
);
|
|
|
|
const parsedResult = parseCompactConvo.mock.results[0].value;
|
|
expect(parsedResult.maxOutputTokens).toBe(8192);
|
|
expect(parsedResult.topP).toBe(0.9);
|
|
expect(parsedResult.temperature).toBe(0.7);
|
|
expect(parsedResult.maxContextTokens).toBe(50000);
|
|
});
|
|
|
|
it('should strip maxOutputTokens when no defaultParamsEndpoint is configured', async () => {
|
|
mockGetEndpointsConfig.mockResolvedValue({
|
|
MyOpenRouter: {
|
|
type: EModelEndpoint.custom,
|
|
},
|
|
});
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: 'MyOpenRouter',
|
|
endpointType: EModelEndpoint.custom,
|
|
model: 'gpt-4o',
|
|
temperature: 0.7,
|
|
maxOutputTokens: 8192,
|
|
max_tokens: 4096,
|
|
},
|
|
{ modelSpecs: null },
|
|
);
|
|
|
|
await buildEndpointOption(req, createRes(), jest.fn());
|
|
|
|
expect(parseCompactConvo).toHaveBeenCalledWith(
|
|
expect.objectContaining({
|
|
defaultParamsEndpoint: undefined,
|
|
}),
|
|
);
|
|
|
|
const parsedResult = parseCompactConvo.mock.results[0].value;
|
|
expect(parsedResult.maxOutputTokens).toBeUndefined();
|
|
expect(parsedResult.max_tokens).toBe(4096);
|
|
expect(parsedResult.temperature).toBe(0.7);
|
|
});
|
|
|
|
it('should strip bedrock region from custom endpoint without defaultParamsEndpoint', async () => {
|
|
mockGetEndpointsConfig.mockResolvedValue({
|
|
MyEndpoint: {
|
|
type: EModelEndpoint.custom,
|
|
},
|
|
});
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: 'MyEndpoint',
|
|
endpointType: EModelEndpoint.custom,
|
|
model: 'gpt-4o',
|
|
temperature: 0.7,
|
|
region: 'us-east-1',
|
|
},
|
|
{ modelSpecs: null },
|
|
);
|
|
|
|
await buildEndpointOption(req, createRes(), jest.fn());
|
|
|
|
const parsedResult = parseCompactConvo.mock.results[0].value;
|
|
expect(parsedResult.region).toBeUndefined();
|
|
expect(parsedResult.temperature).toBe(0.7);
|
|
});
|
|
|
|
it('should pass defaultParamsEndpoint when re-parsing enforced model spec', async () => {
|
|
mockGetEndpointsConfig.mockResolvedValue({
|
|
AnthropicClaude: {
|
|
type: EModelEndpoint.custom,
|
|
customParams: {
|
|
defaultParamsEndpoint: EModelEndpoint.anthropic,
|
|
},
|
|
},
|
|
});
|
|
|
|
const modelSpec = {
|
|
name: 'claude-opus-4.5',
|
|
preset: {
|
|
endpoint: 'AnthropicClaude',
|
|
endpointType: EModelEndpoint.custom,
|
|
model: 'anthropic/claude-opus-4.5',
|
|
temperature: 0.7,
|
|
maxOutputTokens: 8192,
|
|
maxContextTokens: 50000,
|
|
},
|
|
};
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: 'AnthropicClaude',
|
|
endpointType: EModelEndpoint.custom,
|
|
spec: 'claude-opus-4.5',
|
|
model: 'anthropic/claude-opus-4.5',
|
|
},
|
|
{
|
|
modelSpecs: {
|
|
enforce: true,
|
|
list: [modelSpec],
|
|
},
|
|
},
|
|
);
|
|
|
|
await buildEndpointOption(req, createRes(), jest.fn());
|
|
|
|
const enforcedCall = parseCompactConvo.mock.calls[1];
|
|
expect(enforcedCall[0]).toEqual(
|
|
expect.objectContaining({
|
|
defaultParamsEndpoint: EModelEndpoint.anthropic,
|
|
}),
|
|
);
|
|
|
|
const enforcedResult = parseCompactConvo.mock.results[1].value;
|
|
expect(enforcedResult.maxOutputTokens).toBe(8192);
|
|
expect(enforcedResult.temperature).toBe(0.7);
|
|
expect(enforcedResult.maxContextTokens).toBe(50000);
|
|
});
|
|
|
|
it('should fall back to OpenAI schema when getEndpointsConfig fails', async () => {
|
|
mockGetEndpointsConfig.mockRejectedValue(new Error('Config unavailable'));
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: 'AnthropicClaude',
|
|
endpointType: EModelEndpoint.custom,
|
|
model: 'anthropic/claude-opus-4.5',
|
|
temperature: 0.7,
|
|
maxOutputTokens: 8192,
|
|
max_tokens: 4096,
|
|
},
|
|
{ modelSpecs: null },
|
|
);
|
|
|
|
await buildEndpointOption(req, createRes(), jest.fn());
|
|
|
|
expect(parseCompactConvo).toHaveBeenCalledWith(
|
|
expect.objectContaining({
|
|
defaultParamsEndpoint: undefined,
|
|
}),
|
|
);
|
|
|
|
const parsedResult = parseCompactConvo.mock.results[0].value;
|
|
expect(parsedResult.maxOutputTokens).toBeUndefined();
|
|
expect(parsedResult.max_tokens).toBe(4096);
|
|
});
|
|
|
|
it('should not enter the enforce branch when modelSpecs.list is empty', async () => {
|
|
mockGetEndpointsConfig.mockResolvedValue({});
|
|
|
|
const req = createReq(
|
|
{
|
|
endpoint: EModelEndpoint.openAI,
|
|
model: 'gpt-4',
|
|
},
|
|
{
|
|
modelSpecs: {
|
|
enforce: true,
|
|
list: [],
|
|
},
|
|
},
|
|
);
|
|
const res = createRes();
|
|
const { handleError } = require('@librechat/api');
|
|
|
|
await buildEndpointOption(req, res, jest.fn());
|
|
|
|
expect(handleError).not.toHaveBeenCalledWith(
|
|
res,
|
|
expect.objectContaining({ text: 'No model spec selected' }),
|
|
);
|
|
expect(handleError).not.toHaveBeenCalledWith(
|
|
res,
|
|
expect.objectContaining({ text: 'Invalid model spec' }),
|
|
);
|
|
});
|
|
});
|