Have only the following Methods: - `list_models`: getting current available models - `try_list_models`: sync version no refresh for tui use - `get_default_model`: get the default model (should be tightened to core and received on session configuration) - `get_model_info`: get `ModelInfo` for a specific model (should be tightened to core but used in tests) - `refresh_if_new_etag`: trigger refresh on different etags Also move the cache to its own struct
45 lines
1.4 KiB
Rust
45 lines
1.4 KiB
Rust
use std::sync::Arc;
|
|
|
|
use codex_app_server_protocol::Model;
|
|
use codex_app_server_protocol::ReasoningEffortOption;
|
|
use codex_core::ThreadManager;
|
|
use codex_core::config::Config;
|
|
use codex_core::models_manager::manager::RefreshStrategy;
|
|
use codex_protocol::openai_models::ModelPreset;
|
|
use codex_protocol::openai_models::ReasoningEffortPreset;
|
|
|
|
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
|
|
thread_manager
|
|
.list_models(config, RefreshStrategy::OnlineIfUncached)
|
|
.await
|
|
.into_iter()
|
|
.filter(|preset| preset.show_in_picker)
|
|
.map(model_from_preset)
|
|
.collect()
|
|
}
|
|
|
|
fn model_from_preset(preset: ModelPreset) -> Model {
|
|
Model {
|
|
id: preset.id.to_string(),
|
|
model: preset.model.to_string(),
|
|
display_name: preset.display_name.to_string(),
|
|
description: preset.description.to_string(),
|
|
supported_reasoning_efforts: reasoning_efforts_from_preset(
|
|
preset.supported_reasoning_efforts,
|
|
),
|
|
default_reasoning_effort: preset.default_reasoning_effort,
|
|
is_default: preset.is_default,
|
|
}
|
|
}
|
|
|
|
fn reasoning_efforts_from_preset(
|
|
efforts: Vec<ReasoningEffortPreset>,
|
|
) -> Vec<ReasoningEffortOption> {
|
|
efforts
|
|
.iter()
|
|
.map(|preset| ReasoningEffortOption {
|
|
reasoning_effort: preset.effort,
|
|
description: preset.description.to_string(),
|
|
})
|
|
.collect()
|
|
}
|