mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-05-13 16:07:30 +00:00
* 📥 fix: Use Endpoint-Aware Default Model on Imported Conversations Claude conversations imported from claude.ai's data export display "gpt-4o-mini" in the chat UI until the page is refreshed, and any attempt to send a message before refreshing fails with "The model 'gpt-4o-mini' is not available for Anthropic." Root cause: ImportBatchBuilder.finishConversation() unconditionally defaulted the saved conversation's `model` field to openAISettings.model.default, regardless of `this.endpoint`. Claude exports don't carry a model name, so every imported Claude conversation landed with endpoint=anthropic but model=gpt-4o-mini. Fix: pick the default based on `this.endpoint` via a small lookup (openAI -> gpt-4o-mini, anthropic -> claude-3-5-sonnet-latest), keeping the existing OpenAI default as the fallback for unknown endpoints. Fixes #12844 * 🪄 refactor: Resolve Import Default Model From `modelsConfig` Replace the hardcoded per-endpoint default lookup added in the previous commit with a runtime resolver that consults the same models config the chat UI uses (`getModelsConfig` in ModelController -> `loadDefaultModels` + `loadConfigModels`). This way an imported conversation defaults to a model the LibreChat instance has actually configured / discovered for the endpoint, instead of a hardcoded constant that may not exist on this deployment. Resolution order: 1. First non-empty model in `modelsConfig[endpoint]`. 2. Per-endpoint hardcoded fallback (anthropic/openAI settings) if the runtime config is empty for the endpoint or `getModelsConfig` throws. 3. `openAISettings.model.default` if even the per-endpoint fallback is missing (unknown endpoint). `importBatchBuilder.finishConversation` now accepts an optional `defaultModel` argument; each importer resolves it once at the top via `resolveImportDefaultModel({ endpoint, requestUserId, userRole })` and threads it through. ChatGPT message-level model selection also falls back to the resolved default before the hardcoded gpt-4o-mini.
67 lines
2.5 KiB
JavaScript
67 lines
2.5 KiB
JavaScript
const { logger, getTenantId } = require('@librechat/data-schemas');
|
|
const { EModelEndpoint, openAISettings, anthropicSettings } = require('librechat-data-provider');
|
|
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
|
|
|
/**
|
|
* Last-resort hardcoded defaults used only when the runtime models config is
|
|
* unavailable or returns no models for the endpoint.
|
|
*/
|
|
const FALLBACK_MODEL_BY_ENDPOINT = {
|
|
[EModelEndpoint.openAI]: openAISettings.model.default,
|
|
[EModelEndpoint.anthropic]: anthropicSettings.model.default,
|
|
};
|
|
|
|
/**
|
|
* Picks the first available model for an endpoint from a runtime models config.
|
|
*
|
|
* @param {string} endpoint - The endpoint key (e.g. EModelEndpoint.anthropic).
|
|
* @param {TModelsConfig} [modelsConfig] - Map of endpoint -> available model list.
|
|
* @returns {string | undefined} The first model for the endpoint, or undefined.
|
|
*/
|
|
function pickFirstConfiguredModel(endpoint, modelsConfig) {
|
|
const models = modelsConfig?.[endpoint];
|
|
if (!Array.isArray(models)) {
|
|
return undefined;
|
|
}
|
|
for (const model of models) {
|
|
if (typeof model === 'string' && model.length > 0) {
|
|
return model;
|
|
}
|
|
}
|
|
return undefined;
|
|
}
|
|
|
|
/**
|
|
* Resolves the default model that imported conversations should be saved with
|
|
* for a given endpoint. Prefers the first model exposed by the runtime models
|
|
* config (admin-configured / provider-discovered), and only falls back to the
|
|
* hardcoded per-endpoint default if the runtime config is empty or fails.
|
|
*
|
|
* @param {object} args
|
|
* @param {string} args.endpoint - The endpoint key the import is targeting.
|
|
* @param {string} args.requestUserId - The id of the importing user.
|
|
* @param {string} [args.userRole] - The role of the importing user.
|
|
* @returns {Promise<string>} The default model name to persist on the conversation.
|
|
*/
|
|
async function resolveImportDefaultModel({ endpoint, requestUserId, userRole }) {
|
|
try {
|
|
const modelsConfig = await getModelsConfig({
|
|
user: { id: requestUserId, role: userRole, tenantId: getTenantId() },
|
|
});
|
|
const configured = pickFirstConfiguredModel(endpoint, modelsConfig);
|
|
if (configured) {
|
|
return configured;
|
|
}
|
|
} catch (error) {
|
|
logger.warn(
|
|
`[import] Failed to resolve default model from modelsConfig for ${endpoint}: ${error.message}`,
|
|
);
|
|
}
|
|
return FALLBACK_MODEL_BY_ENDPOINT[endpoint] ?? openAISettings.model.default;
|
|
}
|
|
|
|
module.exports = {
|
|
FALLBACK_MODEL_BY_ENDPOINT,
|
|
pickFirstConfiguredModel,
|
|
resolveImportDefaultModel,
|
|
};
|