feat: model picker (#8209)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim 2025-12-17 16:12:35 -08:00 committed by GitHub
parent 25ecd0c2e4
commit 774bd9e432
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 222 additions and 110 deletions

View file

@ -42,6 +42,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
}
}
// todo(aibrahim): fix the priorities to be the opposite here.
/// Write a models_cache.json file to the codex home directory.
/// This prevents ModelsManager from making network requests to refresh models.
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.

View file

@ -176,6 +176,33 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
];
assert_eq!(items, expected_models);
@ -299,7 +326,29 @@ async fn list_models_pagination_works() -> Result<()> {
assert_eq!(fifth_items.len(), 1);
assert_eq!(fifth_items[0].id, "gpt-5.1-codex-max");
assert!(fifth_cursor.is_none());
let sixth_cursor = fifth_cursor.ok_or_else(|| anyhow!("cursor for sixth page"))?;
let sixth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(sixth_cursor.clone()),
})
.await?;
let sixth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(sixth_request)),
)
.await??;
let ModelListResponse {
data: sixth_items,
next_cursor: sixth_cursor,
} = to_response::<ModelListResponse>(sixth_response)?;
assert_eq!(sixth_items.len(), 1);
assert_eq!(sixth_items[0].id, "caribou");
assert!(sixth_cursor.is_none());
Ok(())
}

View file

@ -294,6 +294,20 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
)
// Production models.
} else if slug.starts_with("caribou") {
// Same as gpt-5.1-codex-max.
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: true,
support_verbosity: false,
truncation_policy: TruncationPolicy::Tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("gpt-5.1-codex-max") {
model_family!(
slug, slug,

View file

@ -11,6 +11,35 @@ pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: false,
},
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
@ -35,9 +64,14 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: true,
upgrade: None,
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
@ -62,11 +96,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
@ -86,12 +121,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
}),
upgrade: None,
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.2".to_string(),
@ -118,8 +150,13 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: None,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
@ -143,11 +180,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
// Deprecated models.
ModelPreset {
@ -172,11 +210,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
@ -201,6 +240,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5".to_string(),
@ -228,11 +268,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
]
});

View file

@ -29,7 +29,8 @@ use crate::openai_models::model_presets::builtin_model_presets;
const MODEL_CACHE_FILE: &str = "models_cache.json";
const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300);
const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "caribou";
const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced";
/// Coordinates remote model discovery plus cached metadata on disk.
@ -110,12 +111,12 @@ impl ModelsManager {
if let Err(err) = self.refresh_available_models(config).await {
error!("failed to refresh available models: {err}");
}
let remote_models = self.remote_models.read().await.clone();
let remote_models = self.remote_models(config).await;
self.build_available_models(remote_models)
}
pub fn try_list_models(&self) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.remote_models.try_read()?.clone();
pub fn try_list_models(&self, config: &Config) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.try_get_remote_models(config)?;
Ok(self.build_available_models(remote_models))
}
@ -126,7 +127,7 @@ impl ModelsManager {
/// Look up the requested model family while applying remote metadata overrides.
pub async fn construct_model_family(&self, model: &str, config: &Config) -> ModelFamily {
Self::find_family_for_model(model)
.with_remote_overrides(self.remote_models.read().await.clone())
.with_remote_overrides(self.remote_models(config).await)
.with_config_overrides(config)
}
@ -139,7 +140,7 @@ impl ModelsManager {
}
// if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model
let auth_mode = self.auth_manager.get_auth_mode();
let remote_models = self.remote_models.read().await.clone();
let remote_models = self.remote_models(config).await;
if auth_mode == Some(AuthMode::ChatGPT)
&& self
.build_available_models(remote_models)
@ -147,13 +148,15 @@ impl ModelsManager {
.any(|m| m.model == CODEX_AUTO_BALANCED_MODEL)
{
return CODEX_AUTO_BALANCED_MODEL.to_string();
} else if auth_mode == Some(AuthMode::ChatGPT) {
return OPENAI_DEFAULT_CHATGPT_MODEL.to_string();
}
OPENAI_DEFAULT_MODEL.to_string()
OPENAI_DEFAULT_API_MODEL.to_string()
}
#[cfg(any(test, feature = "test-support"))]
pub fn get_model_offline(model: Option<&str>) -> String {
model.unwrap_or(OPENAI_DEFAULT_MODEL).to_string()
model.unwrap_or(OPENAI_DEFAULT_CHATGPT_MODEL).to_string()
}
#[cfg(any(test, feature = "test-support"))]
@ -217,7 +220,7 @@ impl ModelsManager {
let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect();
let existing_presets = self.local_models.clone();
let mut merged_presets = Self::merge_presets(remote_presets, existing_presets);
merged_presets = Self::filter_visible_models(merged_presets);
merged_presets = self.filter_visible_models(merged_presets);
let has_default = merged_presets.iter().any(|preset| preset.is_default);
if let Some(default) = merged_presets.first_mut()
@ -229,10 +232,11 @@ impl ModelsManager {
merged_presets
}
fn filter_visible_models(models: Vec<ModelPreset>) -> Vec<ModelPreset> {
fn filter_visible_models(&self, models: Vec<ModelPreset>) -> Vec<ModelPreset> {
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
models
.into_iter()
.filter(|model| model.show_in_picker)
.filter(|model| model.show_in_picker && (chatgpt_mode || model.supported_in_api))
.collect()
}
@ -261,6 +265,22 @@ impl ModelsManager {
merged_presets
}
async fn remote_models(&self, config: &Config) -> Vec<ModelInfo> {
if config.features.enabled(Feature::RemoteModels) {
self.remote_models.read().await.clone()
} else {
Vec::new()
}
}
fn try_get_remote_models(&self, config: &Config) -> Result<Vec<ModelInfo>, TryLockError> {
if config.features.enabled(Feature::RemoteModels) {
Ok(self.remote_models.try_read()?.clone())
} else {
Ok(Vec::new())
}
}
fn cache_path(&self) -> PathBuf {
self.codex_home.join(MODEL_CACHE_FILE)
}
@ -393,7 +413,7 @@ mod tests {
.refresh_available_models(&config)
.await
.expect("refresh succeeds");
let cached_remote = manager.remote_models.read().await.clone();
let cached_remote = manager.remote_models(&config).await;
assert_eq!(cached_remote, remote_models);
let available = manager.list_models(&config).await;
@ -455,7 +475,7 @@ mod tests {
.await
.expect("first refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
remote_models,
"remote cache should store fetched models"
);
@ -466,7 +486,7 @@ mod tests {
.await
.expect("cached refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
remote_models,
"cache path should not mutate stored models"
);
@ -537,7 +557,7 @@ mod tests {
.await
.expect("second refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
updated_models,
"stale cache should trigger refetch"
);
@ -602,7 +622,7 @@ mod tests {
.expect("second refresh succeeds");
let available = manager
.try_list_models()
.try_list_models(&config)
.expect("models should be available");
assert!(
available.iter().any(|preset| preset.model == "remote-new"),

View file

@ -762,7 +762,7 @@ async fn includes_configured_effort_in_request() -> anyhow::Result<()> {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_default_effort_in_request() -> anyhow::Result<()> {
async fn includes_no_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
@ -791,7 +791,7 @@ async fn includes_default_effort_in_request() -> anyhow::Result<()> {
.get("reasoning")
.and_then(|t| t.get("effort"))
.and_then(|v| v.as_str()),
Some("medium")
None
);
Ok(())

View file

@ -276,7 +276,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@ -346,7 +345,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@ -407,7 +405,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@ -488,7 +485,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@ -569,7 +565,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,

View file

@ -52,8 +52,11 @@ fn expected_models_for_api_key() -> Vec<ModelPreset> {
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
let mut gpt_5_1_codex_max = gpt_5_1_codex_max();
gpt_5_1_codex_max.is_default = false;
vec![
gpt_5_1_codex_max(),
caribou(),
gpt_5_1_codex_max,
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_2(),
@ -61,6 +64,38 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
]
}
fn caribou() -> ModelPreset {
ModelPreset {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: false,
}
}
fn gpt_5_1_codex_max() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
@ -87,8 +122,9 @@ fn gpt_5_1_codex_max() -> ModelPreset {
),
],
is_default: true,
upgrade: None,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@ -114,15 +150,9 @@ fn gpt_5_1_codex() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1-codex",
vec![
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
],
)),
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@ -144,11 +174,9 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1-codex-mini",
vec![ReasoningEffort::Medium, ReasoningEffort::High],
)),
upgrade: None,
show_in_picker: true,
supported_in_api: true,
}
}
@ -180,8 +208,9 @@ fn gpt_5_2() -> ModelPreset {
),
],
is_default: false,
upgrade: None,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@ -207,59 +236,17 @@ fn gpt_5_1() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1",
vec![
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
],
)),
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
fn gpt_5_1_codex_max_upgrade(
migration_config_key: &str,
supported_efforts: Vec<ReasoningEffort>,
) -> codex_protocol::openai_models::ModelUpgrade {
use std::collections::HashMap;
fn nearest_effort(effort: ReasoningEffort, supported: &[ReasoningEffort]) -> ReasoningEffort {
supported
.iter()
.min_by_key(|candidate| (effort_rank(effort) - effort_rank(**candidate)).abs())
.copied()
.unwrap_or(ReasoningEffort::Low)
}
fn effort_rank(effort: ReasoningEffort) -> i32 {
match effort {
ReasoningEffort::None => 0,
ReasoningEffort::Minimal => 1,
ReasoningEffort::Low => 2,
ReasoningEffort::Medium => 3,
ReasoningEffort::High => 4,
ReasoningEffort::XHigh => 5,
}
}
let mut mapping = HashMap::new();
for effort in [
ReasoningEffort::None,
ReasoningEffort::Minimal,
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
ReasoningEffort::XHigh,
] {
mapping.insert(effort, nearest_effort(effort, &supported_efforts));
}
fn caribou_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
codex_protocol::openai_models::ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
reasoning_effort_mapping: Some(mapping),
migration_config_key: migration_config_key.to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}
}

View file

@ -388,7 +388,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
);
let selected = manager.get_model(&None, &config).await;
assert_eq!(selected, "gpt-5.1-codex-max");
assert_eq!(selected, "caribou");
let available = manager.list_models(&config).await;
assert!(

View file

@ -75,6 +75,8 @@ pub struct ModelPreset {
pub upgrade: Option<ModelUpgrade>,
/// Whether this preset should appear in the picker UI.
pub show_in_picker: bool,
/// whether this model is supported in the api
pub supported_in_api: bool,
}
/// Visibility of a model in the picker or APIs.
@ -216,6 +218,7 @@ impl From<ModelInfo> for ModelPreset {
migration_config_key: info.slug.clone(),
}),
show_in_picker: info.visibility == ModelVisibility::List,
supported_in_api: info.supported_in_api,
}
}
}

View file

@ -2191,7 +2191,7 @@ impl ChatWidget {
}
fn lower_cost_preset(&self) -> Option<ModelPreset> {
let models = self.models_manager.try_list_models().ok()?;
let models = self.models_manager.try_list_models(&self.config).ok()?;
models
.iter()
.find(|preset| preset.model == NUDGE_MODEL_SLUG)
@ -2300,7 +2300,7 @@ impl ChatWidget {
let current_model = self.model_family.get_model_slug().to_string();
let presets: Vec<ModelPreset> =
// todo(aibrahim): make this async function
match self.models_manager.try_list_models() {
match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => {
self.add_info_message(

View file

@ -927,7 +927,7 @@ fn active_blob(chat: &ChatWidget) -> String {
fn get_available_model(chat: &ChatWidget, model: &str) -> ModelPreset {
let models = chat
.models_manager
.try_list_models()
.try_list_models(&chat.config)
.expect("models lock available");
models
.iter()
@ -2014,6 +2014,7 @@ fn single_reasoning_option_skips_selection() {
is_default: false,
upgrade: None,
show_in_picker: true,
supported_in_api: true,
};
chat.open_reasoning_popup(preset);

View file

@ -2100,7 +2100,7 @@ impl ChatWidget {
}
fn lower_cost_preset(&self) -> Option<ModelPreset> {
let models = self.models_manager.try_list_models().ok()?;
let models = self.models_manager.try_list_models(&self.config).ok()?;
models
.iter()
.find(|preset| preset.model == NUDGE_MODEL_SLUG)
@ -2209,7 +2209,7 @@ impl ChatWidget {
let current_model = self.model_family.get_model_slug().to_string();
let presets: Vec<ModelPreset> =
// todo(aibrahim): make this async function
match self.models_manager.try_list_models() {
match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => {
self.add_info_message(

View file

@ -925,7 +925,7 @@ fn active_blob(chat: &ChatWidget) -> String {
fn get_available_model(chat: &ChatWidget, model: &str) -> ModelPreset {
let models = chat
.models_manager
.try_list_models()
.try_list_models(&chat.config)
.expect("models lock available");
models
.iter()
@ -1906,6 +1906,7 @@ fn single_reasoning_option_skips_selection() {
is_default: false,
upgrade: None,
show_in_picker: true,
supported_in_api: true,
};
chat.open_reasoning_popup(preset);