Add model availability NUX tooltips (#13021)

- override startup tooltips with model availability NUX and persist
per-model show counts in config
- stop showing each model after four exposures and fall back to normal
tooltips
This commit is contained in:
Ahmed Ibrahim 2026-02-27 17:14:06 -08:00 committed by GitHub
parent ff5cbfd7d4
commit ec6f6aacbf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 657 additions and 6 deletions

View file

@ -659,6 +659,14 @@
},
"type": "object"
},
"ModelAvailabilityNuxConfig": {
"additionalProperties": {
"format": "uint32",
"minimum": 0.0,
"type": "integer"
},
"type": "object"
},
"ModelProviderInfo": {
"additionalProperties": false,
"description": "Serializable representation of a provider definition.",
@ -1420,6 +1428,15 @@
"description": "Enable animations (welcome screen, shimmer effects, spinners). Defaults to `true`.",
"type": "boolean"
},
"model_availability_nux": {
"allOf": [
{
"$ref": "#/definitions/ModelAvailabilityNuxConfig"
}
],
"default": {},
"description": "Startup tooltip availability NUX state persisted by the TUI."
},
"notification_method": {
"allOf": [
{

View file

@ -8,6 +8,7 @@ use codex_protocol::config_types::Personality;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tokio::task;
@ -75,6 +76,27 @@ pub fn status_line_items_edit(items: &[String]) -> ConfigEdit {
}
}
pub fn model_availability_nux_count_edits(shown_count: &HashMap<String, u32>) -> Vec<ConfigEdit> {
let mut shown_count_entries: Vec<_> = shown_count.iter().collect();
shown_count_entries.sort_unstable_by(|(left, _), (right, _)| left.cmp(right));
let mut edits = vec![ConfigEdit::ClearPath {
segments: vec!["tui".to_string(), "model_availability_nux".to_string()],
}];
for (model_slug, count) in shown_count_entries {
edits.push(ConfigEdit::SetPath {
segments: vec![
"tui".to_string(),
"model_availability_nux".to_string(),
model_slug.clone(),
],
value: value(i64::from(*count)),
});
}
edits
}
// TODO(jif) move to a dedicated file
mod document_helpers {
use crate::config::types::McpServerConfig;
@ -799,6 +821,12 @@ impl ConfigEditsBuilder {
self
}
pub fn set_model_availability_nux_count(mut self, shown_count: &HashMap<String, u32>) -> Self {
self.edits
.extend(model_availability_nux_count_edits(shown_count));
self
}
pub fn replace_mcp_servers(mut self, servers: &BTreeMap<String, McpServerConfig>) -> Self {
self.edits
.push(ConfigEdit::ReplaceMcpServers(servers.clone()));
@ -963,6 +991,25 @@ model_reasoning_effort = "high"
assert_eq!(contents, "enabled = true\n");
}
#[test]
fn set_model_availability_nux_count_writes_shown_count() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let shown_count = HashMap::from([("gpt-foo".to_string(), 4)]);
ConfigEditsBuilder::new(codex_home)
.set_model_availability_nux_count(&shown_count)
.apply_blocking()
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[tui.model_availability_nux]
gpt-foo = 4
"#;
assert_eq!(contents, expected);
}
#[test]
fn set_skill_config_writes_disabled_entry() {
let tmp = tempdir().expect("tmpdir");

View file

@ -9,6 +9,7 @@ use crate::config::types::McpServerDisabledReason;
use crate::config::types::McpServerTransportConfig;
use crate::config::types::MemoriesConfig;
use crate::config::types::MemoriesToml;
use crate::config::types::ModelAvailabilityNuxConfig;
use crate::config::types::Notice;
use crate::config::types::NotificationMethod;
use crate::config::types::Notifications;
@ -276,6 +277,9 @@ pub struct Config {
/// Show startup tooltips in the TUI welcome screen.
pub show_tooltips: bool,
/// Persisted startup availability NUX state for model tooltips.
pub model_availability_nux: ModelAvailabilityNuxConfig,
/// Start the TUI in the specified collaboration mode (plan/default).
/// Controls whether the TUI uses the terminal's alternate screen buffer.
@ -2213,6 +2217,11 @@ impl Config {
.unwrap_or_default(),
animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true),
show_tooltips: cfg.tui.as_ref().map(|t| t.show_tooltips).unwrap_or(true),
model_availability_nux: cfg
.tui
.as_ref()
.map(|t| t.model_availability_nux.clone())
.unwrap_or_default(),
tui_alternate_screen: cfg
.tui
.as_ref()
@ -2401,6 +2410,7 @@ mod tests {
use crate::config::types::McpServerTransportConfig;
use crate::config::types::MemoriesConfig;
use crate::config::types::MemoriesToml;
use crate::config::types::ModelAvailabilityNuxConfig;
use crate::config::types::NotificationMethod;
use crate::config::types::Notifications;
use crate::config_loader::RequirementSource;
@ -2539,6 +2549,51 @@ phase_2_model = "gpt-5"
);
}
#[test]
fn config_toml_deserializes_model_availability_nux() {
let toml = r#"
[tui.model_availability_nux]
"gpt-foo" = 2
"gpt-bar" = 4
"#;
let cfg: ConfigToml =
toml::from_str(toml).expect("TOML deserialization should succeed for TUI NUX");
assert_eq!(
cfg.tui.expect("tui config should deserialize"),
Tui {
notifications: Notifications::default(),
notification_method: NotificationMethod::default(),
animations: true,
show_tooltips: true,
alternate_screen: AltScreenMode::default(),
status_line: None,
theme: None,
model_availability_nux: ModelAvailabilityNuxConfig {
shown_count: HashMap::from([
("gpt-bar".to_string(), 4),
("gpt-foo".to_string(), 2),
]),
},
}
);
}
#[test]
fn runtime_config_defaults_model_availability_nux() {
let cfg = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
tempdir().expect("tempdir").path().to_path_buf(),
)
.expect("load config");
assert_eq!(
cfg.model_availability_nux,
ModelAvailabilityNuxConfig::default()
);
}
#[test]
fn config_toml_deserializes_permissions_network() {
let toml = r#"
@ -2673,6 +2728,7 @@ theme = "dracula"
alternate_screen: AltScreenMode::Auto,
status_line: None,
theme: None,
model_availability_nux: ModelAvailabilityNuxConfig::default(),
}
);
}
@ -4884,6 +4940,7 @@ model_verbosity = "high"
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
model_availability_nux: ModelAvailabilityNuxConfig::default(),
analytics_enabled: Some(true),
feedback_enabled: true,
tui_alternate_screen: AltScreenMode::Auto,
@ -5011,6 +5068,7 @@ model_verbosity = "high"
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
model_availability_nux: ModelAvailabilityNuxConfig::default(),
analytics_enabled: Some(true),
feedback_enabled: true,
tui_alternate_screen: AltScreenMode::Auto,
@ -5136,6 +5194,7 @@ model_verbosity = "high"
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
model_availability_nux: ModelAvailabilityNuxConfig::default(),
analytics_enabled: Some(false),
feedback_enabled: true,
tui_alternate_screen: AltScreenMode::Auto,
@ -5247,6 +5306,7 @@ model_verbosity = "high"
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
model_availability_nux: ModelAvailabilityNuxConfig::default(),
analytics_enabled: Some(true),
feedback_enabled: true,
tui_alternate_screen: AltScreenMode::Auto,

View file

@ -667,6 +667,14 @@ impl fmt::Display for NotificationMethod {
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default, JsonSchema)]
#[schemars(deny_unknown_fields)]
pub struct ModelAvailabilityNuxConfig {
/// Number of times a startup availability NUX has been shown per model slug.
#[serde(default, flatten)]
pub shown_count: HashMap<String, u32>,
}
/// Collection of settings that are specific to the TUI.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
#[schemars(deny_unknown_fields)]
@ -716,6 +724,10 @@ pub struct Tui {
/// Use `/theme` in the TUI or see `$CODEX_HOME/themes` for custom themes.
#[serde(default)]
pub theme: Option<String>,
/// Startup tooltip availability NUX state persisted by the TUI.
#[serde(default)]
pub model_availability_nux: ModelAvailabilityNuxConfig,
}
const fn default_true() -> bool {

View file

@ -46,6 +46,7 @@ use codex_core::config::ConfigBuilder;
use codex_core::config::ConfigOverrides;
use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::types::ModelAvailabilityNuxConfig;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::features::Feature;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
@ -61,6 +62,7 @@ use codex_protocol::config_types::Personality;
#[cfg(target_os = "windows")]
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::items::TurnItem;
use codex_protocol::openai_models::ModelAvailabilityNux;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
@ -451,12 +453,77 @@ fn target_preset_for_upgrade<'a>(
.find(|preset| preset.model == target_model && preset.show_in_picker)
}
const MODEL_AVAILABILITY_NUX_MAX_SHOW_COUNT: u32 = 4;
#[derive(Debug, Clone, PartialEq, Eq)]
struct StartupTooltipOverride {
model_slug: String,
message: String,
}
fn select_model_availability_nux(
available_models: &[ModelPreset],
nux_config: &ModelAvailabilityNuxConfig,
) -> Option<StartupTooltipOverride> {
available_models.iter().find_map(|preset| {
let ModelAvailabilityNux { message } = preset.availability_nux.as_ref()?;
let shown_count = nux_config
.shown_count
.get(&preset.model)
.copied()
.unwrap_or_default();
(shown_count < MODEL_AVAILABILITY_NUX_MAX_SHOW_COUNT).then(|| StartupTooltipOverride {
model_slug: preset.model.clone(),
message: message.clone(),
})
})
}
async fn prepare_startup_tooltip_override(
config: &mut Config,
available_models: &[ModelPreset],
is_first_run: bool,
) -> Option<String> {
if is_first_run || !config.show_tooltips {
return None;
}
let tooltip_override =
select_model_availability_nux(available_models, &config.model_availability_nux)?;
let shown_count = config
.model_availability_nux
.shown_count
.get(&tooltip_override.model_slug)
.copied()
.unwrap_or_default();
let next_count = shown_count.saturating_add(1);
let mut updated_shown_count = config.model_availability_nux.shown_count.clone();
updated_shown_count.insert(tooltip_override.model_slug.clone(), next_count);
if let Err(err) = ConfigEditsBuilder::new(&config.codex_home)
.set_model_availability_nux_count(&updated_shown_count)
.apply()
.await
{
tracing::error!(
error = %err,
model = %tooltip_override.model_slug,
"failed to persist model availability nux count"
);
return Some(tooltip_override.message);
}
config.model_availability_nux.shown_count = updated_shown_count;
Some(tooltip_override.message)
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
model: &str,
app_event_tx: &AppEventSender,
available_models: Vec<ModelPreset>,
available_models: &[ModelPreset],
) -> Option<AppExitInfo> {
let upgrade = available_models
.iter()
@ -481,13 +548,13 @@ async fn handle_model_migration_prompt_if_needed(
model,
&target_model,
&config.notices.model_migrations,
&available_models,
available_models,
) {
return None;
}
let current_preset = available_models.iter().find(|preset| preset.model == model);
let target_preset = target_preset_for_upgrade(&available_models, &target_model);
let target_preset = target_preset_for_upgrade(available_models, &target_model);
let target_preset = target_preset?;
let target_display_name = target_preset.display_name.clone();
let heading_label = if target_display_name == model {
@ -668,6 +735,7 @@ impl App {
is_first_run: false,
feedback_audience: self.feedback_audience,
model: Some(self.chat_widget.current_model().to_string()),
startup_tooltip_override: None,
status_line_invalid_items_warned: self.status_line_invalid_items_warned.clone(),
otel_manager: self.otel_manager.clone(),
}
@ -1194,6 +1262,7 @@ impl App {
is_first_run: false,
feedback_audience: self.feedback_audience,
model: Some(model),
startup_tooltip_override: None,
status_line_invalid_items_warned: self.status_line_invalid_items_warned.clone(),
otel_manager: self.otel_manager.clone(),
};
@ -1340,7 +1409,7 @@ impl App {
&mut config,
model.as_str(),
&app_event_tx,
available_models,
&available_models,
)
.await;
if let Some(exit_info) = exit_info {
@ -1349,7 +1418,6 @@ impl App {
if let Some(updated_model) = config.model.clone() {
model = updated_model;
}
let auth = auth_manager.auth().await;
let auth_ref = auth.as_ref();
// Determine who should see internal Slack routing. We treat
@ -1393,6 +1461,9 @@ impl App {
Self::should_wait_for_initial_session(&session_selection);
let mut chat_widget = match session_selection {
SessionSelection::StartFresh | SessionSelection::Exit => {
let startup_tooltip_override =
prepare_startup_tooltip_override(&mut config, &available_models, is_first_run)
.await;
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
@ -1410,6 +1481,7 @@ impl App {
is_first_run,
feedback_audience,
model: Some(model.clone()),
startup_tooltip_override,
status_line_invalid_items_warned: status_line_invalid_items_warned.clone(),
otel_manager: otel_manager.clone(),
};
@ -1444,6 +1516,7 @@ impl App {
is_first_run,
feedback_audience,
model: config.model.clone(),
startup_tooltip_override: None,
status_line_invalid_items_warned: status_line_invalid_items_warned.clone(),
otel_manager: otel_manager.clone(),
};
@ -1480,6 +1553,7 @@ impl App {
is_first_run,
feedback_audience,
model: config.model.clone(),
startup_tooltip_override: None,
status_line_invalid_items_warned: status_line_invalid_items_warned.clone(),
otel_manager: otel_manager.clone(),
};
@ -3413,8 +3487,10 @@ mod tests {
use codex_core::CodexAuth;
use codex_core::config::ConfigBuilder;
use codex_core::config::ConfigOverrides;
use codex_core::config::types::ModelAvailabilityNuxConfig;
use codex_otel::OtelManager;
use codex_protocol::ThreadId;
use codex_protocol::openai_models::ModelAvailabilityNux;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
@ -3866,6 +3942,7 @@ mod tests {
event,
is_first,
None,
None,
)) as Arc<dyn HistoryCell>
};
@ -4068,6 +4145,15 @@ mod tests {
codex_core::test_support::all_model_presets().clone()
}
fn model_availability_nux_config(shown_count: &[(&str, u32)]) -> ModelAvailabilityNuxConfig {
ModelAvailabilityNuxConfig {
shown_count: shown_count
.iter()
.map(|(model, count)| ((*model).to_string(), *count))
.collect(),
}
}
fn model_migration_copy_to_plain_text(
copy: &crate::model_migration::ModelMigrationCopy,
) -> String {
@ -4124,6 +4210,120 @@ mod tests {
));
}
#[test]
fn select_model_availability_nux_picks_only_eligible_model() {
let mut presets = all_model_presets();
presets.iter_mut().for_each(|preset| {
preset.availability_nux = None;
});
let target = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5")
.expect("target preset present");
target.availability_nux = Some(ModelAvailabilityNux {
message: "gpt-5 is available".to_string(),
});
let selected = select_model_availability_nux(&presets, &model_availability_nux_config(&[]));
assert_eq!(
selected,
Some(StartupTooltipOverride {
model_slug: "gpt-5".to_string(),
message: "gpt-5 is available".to_string(),
})
);
}
#[test]
fn select_model_availability_nux_skips_missing_and_exhausted_models() {
let mut presets = all_model_presets();
presets.iter_mut().for_each(|preset| {
preset.availability_nux = None;
});
let gpt_5 = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5")
.expect("gpt-5 preset present");
gpt_5.availability_nux = Some(ModelAvailabilityNux {
message: "gpt-5 is available".to_string(),
});
let gpt_5_2 = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5.2")
.expect("gpt-5.2 preset present");
gpt_5_2.availability_nux = Some(ModelAvailabilityNux {
message: "gpt-5.2 is available".to_string(),
});
let selected = select_model_availability_nux(
&presets,
&model_availability_nux_config(&[("gpt-5", MODEL_AVAILABILITY_NUX_MAX_SHOW_COUNT)]),
);
assert_eq!(
selected,
Some(StartupTooltipOverride {
model_slug: "gpt-5.2".to_string(),
message: "gpt-5.2 is available".to_string(),
})
);
}
#[test]
fn select_model_availability_nux_uses_existing_model_order_as_priority() {
let mut presets = all_model_presets();
presets.iter_mut().for_each(|preset| {
preset.availability_nux = None;
});
let first = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5")
.expect("gpt-5 preset present");
first.availability_nux = Some(ModelAvailabilityNux {
message: "first".to_string(),
});
let second = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5.2")
.expect("gpt-5.2 preset present");
second.availability_nux = Some(ModelAvailabilityNux {
message: "second".to_string(),
});
let selected = select_model_availability_nux(&presets, &model_availability_nux_config(&[]));
assert_eq!(
selected,
Some(StartupTooltipOverride {
model_slug: "gpt-5.2".to_string(),
message: "second".to_string(),
})
);
}
#[test]
fn select_model_availability_nux_returns_none_when_all_models_are_exhausted() {
let mut presets = all_model_presets();
presets.iter_mut().for_each(|preset| {
preset.availability_nux = None;
});
let target = presets
.iter_mut()
.find(|preset| preset.model == "gpt-5")
.expect("target preset present");
target.availability_nux = Some(ModelAvailabilityNux {
message: "gpt-5 is available".to_string(),
});
let selected = select_model_availability_nux(
&presets,
&model_availability_nux_config(&[("gpt-5", MODEL_AVAILABILITY_NUX_MAX_SHOW_COUNT)]),
);
assert_eq!(selected, None);
}
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
@ -4363,6 +4563,7 @@ mod tests {
event,
is_first,
None,
None,
)) as Arc<dyn HistoryCell>
};

View file

@ -462,6 +462,7 @@ pub(crate) struct ChatWidgetInit {
pub(crate) is_first_run: bool,
pub(crate) feedback_audience: FeedbackAudience,
pub(crate) model: Option<String>,
pub(crate) startup_tooltip_override: Option<String>,
// Shared latch so we only warn once about invalid status-line item IDs.
pub(crate) status_line_invalid_items_warned: Arc<AtomicBool>,
pub(crate) otel_manager: OtelManager,
@ -603,6 +604,8 @@ pub(crate) struct ChatWidget {
frame_requester: FrameRequester,
// Whether to include the initial welcome banner on session configured
show_welcome_banner: bool,
// One-shot tooltip override for the primary startup session.
startup_tooltip_override: Option<String>,
// When resuming an existing session (selected via resume picker), avoid an
// immediate redraw on SessionConfigured to prevent a gratuitous UI flicker.
suppress_session_configured_redraw: bool,
@ -1144,11 +1147,13 @@ impl ChatWidget {
);
self.refresh_model_display();
self.sync_personality_command_enabled();
let startup_tooltip_override = self.startup_tooltip_override.take();
let session_info_cell = history_cell::new_session_info(
&self.config,
&model_for_header,
event,
self.show_welcome_banner,
startup_tooltip_override,
self.auth_manager
.auth_cached()
.and_then(|auth| auth.account_plan_type()),
@ -2755,6 +2760,7 @@ impl ChatWidget {
is_first_run,
feedback_audience,
model,
startup_tooltip_override,
status_line_invalid_items_warned,
otel_manager,
} = common;
@ -2852,6 +2858,7 @@ impl ChatWidget {
queued_user_messages: VecDeque::new(),
queued_message_edit_binding,
show_welcome_banner: is_first_run,
startup_tooltip_override,
suppress_session_configured_redraw: false,
pending_notification: None,
quit_shortcut_expires_at: None,
@ -2933,6 +2940,7 @@ impl ChatWidget {
is_first_run,
feedback_audience,
model,
startup_tooltip_override,
status_line_invalid_items_warned,
otel_manager,
} = common;
@ -3033,6 +3041,7 @@ impl ChatWidget {
queued_user_messages: VecDeque::new(),
queued_message_edit_binding,
show_welcome_banner: is_first_run,
startup_tooltip_override,
suppress_session_configured_redraw: false,
pending_notification: None,
quit_shortcut_expires_at: None,
@ -3099,6 +3108,7 @@ impl ChatWidget {
is_first_run: _,
feedback_audience,
model,
startup_tooltip_override: _,
status_line_invalid_items_warned,
otel_manager,
} = common;
@ -3195,6 +3205,7 @@ impl ChatWidget {
queued_user_messages: VecDeque::new(),
queued_message_edit_binding,
show_welcome_banner: false,
startup_tooltip_override: None,
suppress_session_configured_redraw: true,
pending_notification: None,
quit_shortcut_expires_at: None,

View file

@ -1581,6 +1581,7 @@ async fn helpers_are_available_and_do_not_panic() {
is_first_run: true,
feedback_audience: FeedbackAudience::External,
model: Some(resolved_model),
startup_tooltip_override: None,
status_line_invalid_items_warned: Arc::new(AtomicBool::new(false)),
otel_manager,
};
@ -1705,6 +1706,7 @@ async fn make_chatwidget_manual(
forked_from: None,
frame_requester: FrameRequester::test_dummy(),
show_welcome_banner: true,
startup_tooltip_override: None,
queued_user_messages: VecDeque::new(),
queued_message_edit_binding: crate::key_hint::alt(KeyCode::Up),
suppress_session_configured_redraw: false,
@ -4488,6 +4490,7 @@ async fn collaboration_modes_defaults_to_code_on_startup() {
is_first_run: true,
feedback_audience: FeedbackAudience::External,
model: Some(resolved_model.clone()),
startup_tooltip_override: None,
status_line_invalid_items_warned: Arc::new(AtomicBool::new(false)),
otel_manager,
};
@ -4537,6 +4540,7 @@ async fn experimental_mode_plan_is_ignored_on_startup() {
is_first_run: true,
feedback_audience: FeedbackAudience::External,
model: Some(resolved_model.clone()),
startup_tooltip_override: None,
status_line_invalid_items_warned: Arc::new(AtomicBool::new(false)),
otel_manager,
};

View file

@ -1041,6 +1041,7 @@ pub(crate) fn new_session_info(
requested_model: &str,
event: SessionConfiguredEvent,
is_first_event: bool,
tooltip_override: Option<String>,
auth_plan: Option<PlanType>,
) -> SessionInfoCell {
let SessionConfiguredEvent {
@ -1094,7 +1095,9 @@ pub(crate) fn new_session_info(
parts.push(Box::new(PlainHistoryCell { lines: help_lines }));
} else {
if config.show_tooltips
&& let Some(tooltips) = tooltips::get_tooltip(auth_plan).map(TooltipHistoryCell::new)
&& let Some(tooltips) = tooltip_override
.or_else(|| tooltips::get_tooltip(auth_plan))
.map(TooltipHistoryCell::new)
{
parts.push(Box::new(tooltips));
}
@ -2396,13 +2399,19 @@ mod tests {
use codex_core::config::types::McpServerTransportConfig;
use codex_otel::RuntimeMetricTotals;
use codex_otel::RuntimeMetricsSummary;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::models::WebSearchAction;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::McpAuthStatus;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionConfiguredEvent;
use dirs::home_dir;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::collections::HashMap;
use std::path::PathBuf;
use codex_protocol::mcp::CallToolResult;
use codex_protocol::mcp::Tool;
@ -2463,6 +2472,25 @@ mod tests {
.expect("resource link content should serialize")
}
fn session_configured_event(model: &str) -> SessionConfiguredEvent {
SessionConfiguredEvent {
session_id: ThreadId::new(),
forked_from_id: None,
thread_name: None,
model: model.to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
cwd: PathBuf::from("/tmp/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
network_proxy: None,
rollout_path: Some(PathBuf::new()),
}
}
#[test]
fn unified_exec_interaction_cell_renders_input() {
let cell =
@ -2547,6 +2575,73 @@ mod tests {
insta::assert_snapshot!(rendered);
}
#[tokio::test]
async fn session_info_uses_availability_nux_tooltip_override() {
let config = test_config().await;
let cell = new_session_info(
&config,
"gpt-5",
session_configured_event("gpt-5"),
false,
Some("Model just became available".to_string()),
Some(PlanType::Free),
);
let rendered = render_transcript(&cell).join("\n");
assert!(rendered.contains("Model just became available"));
}
#[tokio::test]
async fn session_info_availability_nux_tooltip_snapshot() {
let mut config = test_config().await;
config.cwd = PathBuf::from("/tmp/project");
let cell = new_session_info(
&config,
"gpt-5",
session_configured_event("gpt-5"),
false,
Some("Model just became available".to_string()),
Some(PlanType::Free),
);
let rendered = render_transcript(&cell).join("\n");
insta::assert_snapshot!(rendered);
}
#[tokio::test]
async fn session_info_first_event_suppresses_tooltips_and_nux() {
let config = test_config().await;
let cell = new_session_info(
&config,
"gpt-5",
session_configured_event("gpt-5"),
true,
Some("Model just became available".to_string()),
Some(PlanType::Free),
);
let rendered = render_transcript(&cell).join("\n");
assert!(!rendered.contains("Model just became available"));
assert!(rendered.contains("To get started"));
}
#[tokio::test]
async fn session_info_hides_tooltips_when_disabled() {
let mut config = test_config().await;
config.show_tooltips = false;
let cell = new_session_info(
&config,
"gpt-5",
session_configured_event("gpt-5"),
false,
Some("Model just became available".to_string()),
Some(PlanType::Free),
);
let rendered = render_transcript(&cell).join("\n");
assert!(!rendered.contains("Model just became available"));
}
#[test]
fn ps_output_multiline_snapshot() {
let cell = new_unified_exec_processes_output(vec![

View file

@ -0,0 +1,13 @@
---
source: tui/src/history_cell.rs
assertion_line: 2608
expression: rendered
---
╭─────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: gpt-5 /model to change │
│ directory: /tmp/project │
╰─────────────────────────────────────╯
Tip: Model just became available

View file

@ -1,4 +1,5 @@
// Aggregates all former standalone integration tests as modules.
mod model_availability_nux;
mod no_panic_on_startup;
mod status_indicator;
mod vt100_history;

View file

@ -0,0 +1,190 @@
use std::collections::HashMap;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use serde_json::Value as JsonValue;
use tempfile::tempdir;
use tokio::select;
use tokio::time::sleep;
use tokio::time::timeout;
#[tokio::test]
async fn resume_startup_does_not_consume_model_availability_nux_count() -> Result<()> {
// run_codex_cli() does not work on Windows due to PTY limitations.
if cfg!(windows) {
return Ok(());
}
let repo_root = codex_utils_cargo_bin::repo_root()?;
let codex_home = tempdir()?;
let source_catalog_path = codex_utils_cargo_bin::find_resource!("../core/models.json")?;
let source_catalog = std::fs::read_to_string(&source_catalog_path)?;
let mut source_catalog: JsonValue = serde_json::from_str(&source_catalog)?;
let models = source_catalog
.get_mut("models")
.and_then(JsonValue::as_array_mut)
.context("models array missing")?;
for model in models.iter_mut() {
if let Some(object) = model.as_object_mut() {
object.remove("availability_nux");
}
}
let first_model = models.first_mut().context("models array is empty")?;
let first_model_object = first_model
.as_object_mut()
.context("first model was not a JSON object")?;
let model_slug = first_model_object
.get("slug")
.and_then(JsonValue::as_str)
.context("first model missing slug")?
.to_string();
first_model_object.insert(
"availability_nux".to_string(),
serde_json::json!({
"message": "Model now available",
}),
);
let custom_catalog_path = codex_home.path().join("catalog.json");
std::fs::write(
&custom_catalog_path,
serde_json::to_string(&source_catalog)?,
)?;
let repo_root_display = repo_root.display();
let catalog_display = custom_catalog_path.display();
let config_contents = format!(
r#"model = "{model_slug}"
model_provider = "openai"
model_catalog_json = "{catalog_display}"
[projects."{repo_root_display}"]
trust_level = "trusted"
[tui.model_availability_nux]
"{model_slug}" = 1
"#
);
std::fs::write(codex_home.path().join("config.toml"), config_contents)?;
let fixture_path =
codex_utils_cargo_bin::find_resource!("../core/tests/cli_responses_fixture.sse")?;
let codex = if let Ok(path) = codex_utils_cargo_bin::cargo_bin("codex") {
path
} else {
let fallback = repo_root.join("codex-rs/target/debug/codex");
if fallback.is_file() {
fallback
} else {
eprintln!("skipping integration test because codex binary is unavailable");
return Ok(());
}
};
let exec_output = std::process::Command::new(&codex)
.arg("exec")
.arg("--skip-git-repo-check")
.arg("-C")
.arg(&repo_root)
.arg("seed session for resume")
.env("CODEX_HOME", codex_home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", fixture_path)
.env("OPENAI_BASE_URL", "http://unused.local")
.output()
.context("failed to execute codex exec")?;
anyhow::ensure!(
exec_output.status.success(),
"codex exec failed: {}",
String::from_utf8_lossy(&exec_output.stderr)
);
let mut env = HashMap::new();
env.insert(
"CODEX_HOME".to_string(),
codex_home.path().display().to_string(),
);
env.insert("OPENAI_API_KEY".to_string(), "dummy".to_string());
let args = vec![
"resume".to_string(),
"--last".to_string(),
"--no-alt-screen".to_string(),
"-C".to_string(),
repo_root.display().to_string(),
"-c".to_string(),
"analytics.enabled=false".to_string(),
];
let spawned = codex_utils_pty::spawn_pty_process(
codex.to_string_lossy().as_ref(),
&args,
&repo_root,
&env,
&None,
)
.await?;
let mut output = Vec::new();
let mut output_rx = spawned.output_rx;
let mut exit_rx = spawned.exit_rx;
let writer_tx = spawned.session.writer_sender();
let interrupt_writer = writer_tx.clone();
let interrupt_task = tokio::spawn(async move {
sleep(Duration::from_secs(2)).await;
for _ in 0..4 {
let _ = interrupt_writer.send(vec![3]).await;
sleep(Duration::from_millis(500)).await;
}
});
let exit_code_result = timeout(Duration::from_secs(15), async {
loop {
select! {
result = output_rx.recv() => match result {
Ok(chunk) => {
if chunk.windows(4).any(|window| window == b"\x1b[6n") {
let _ = writer_tx.send(b"\x1b[1;1R".to_vec()).await;
}
output.extend_from_slice(&chunk);
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => break exit_rx.await,
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => {}
},
result = &mut exit_rx => break result,
}
}
})
.await;
interrupt_task.abort();
let exit_code = match exit_code_result {
Ok(Ok(code)) => code,
Ok(Err(err)) => return Err(err.into()),
Err(_) => {
spawned.session.terminate();
anyhow::bail!("timed out waiting for codex resume to exit");
}
};
anyhow::ensure!(
exit_code == 0 || exit_code == 130,
"unexpected exit code from codex resume: {exit_code}; output: {}",
String::from_utf8_lossy(&output)
);
let config_contents = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
let config: toml::Value = toml::from_str(&config_contents)?;
let shown_count = config
.get("tui")
.and_then(|tui| tui.get("model_availability_nux"))
.and_then(|nux| nux.get(&model_slug))
.and_then(toml::Value::as_integer)
.context("missing tui.model_availability_nux count")?;
assert_eq!(shown_count, 1);
Ok(())
}