feat(tui) /personality (#9718)

## Summary
Adds /personality selector in the TUI, which leverages the new core
interface in #9644

Notes:
- We are doing some of our own state management for model_info loading
here, but not sure if that's ideal. open to opinions on simpler
approach, but would like to avoid blocking on a larger refactor
- Right now, the `/personality` selector just hides when the model
doesn't support it. we can update this behavior down the line

## Testing
- [x] Tested locally
- [x] Added snapshot tests
This commit is contained in:
Dylan Hurd 2026-01-25 21:59:42 -08:00 committed by GitHub
parent d27f2533a9
commit 031bafd1fb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 421 additions and 32 deletions

View file

@ -905,6 +905,8 @@ pub struct Model {
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
#[serde(default)]
pub supports_personality: bool,
// Only one model should be marked as default.
pub is_default: bool,
}

View file

@ -28,6 +28,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
supports_personality: preset.supports_personality,
is_default: preset.is_default,
}
}

View file

@ -72,6 +72,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: true,
},
Model {
@ -99,6 +100,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@ -118,6 +120,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@ -151,6 +154,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
];

View file

@ -4,6 +4,7 @@ use crate::config::types::Notice;
use crate::path_utils::resolve_symlink_write_paths;
use crate::path_utils::write_atomically;
use anyhow::Context;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::collections::BTreeMap;
@ -24,6 +25,8 @@ pub enum ConfigEdit {
model: Option<String>,
effort: Option<ReasoningEffort>,
},
/// Update the active (or default) model personality.
SetModelPersonality { personality: Option<Personality> },
/// Toggle the acknowledgement flag under `[notice]`.
SetNoticeHideFullAccessWarning(bool),
/// Toggle the Windows world-writable directories warning acknowledgement flag.
@ -269,6 +272,10 @@ impl ConfigDocument {
);
mutated
}),
ConfigEdit::SetModelPersonality { personality } => Ok(self.write_profile_value(
&["model_personality"],
personality.map(|personality| value(personality.to_string())),
)),
ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, "hide_full_access_warning"],
@ -712,6 +719,12 @@ impl ConfigEditsBuilder {
self
}
pub fn set_model_personality(mut self, personality: Option<Personality>) -> Self {
self.edits
.push(ConfigEdit::SetModelPersonality { personality });
self
}
pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged));

View file

@ -36,6 +36,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: true,
is_default: true,
upgrade: None,
show_in_picker: true,
@ -65,6 +66,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
@ -87,6 +89,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
.to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
@ -116,6 +119,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
@ -145,6 +149,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: true,
is_default: false,
upgrade: None,
show_in_picker: false,
@ -174,6 +179,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: false,
@ -200,6 +206,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
@ -221,6 +228,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
@ -247,6 +255,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
.to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
@ -276,6 +285,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
@ -301,6 +311,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,

View file

@ -94,6 +94,7 @@ fn gpt_52_codex() -> ModelPreset {
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: true,
upgrade: None,
show_in_picker: true,
@ -126,6 +127,7 @@ fn gpt_5_1_codex_max() -> ModelPreset {
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-max",
@ -160,6 +162,7 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-mini",
@ -204,6 +207,7 @@ fn gpt_5_2() -> ModelPreset {
"Extra high reasoning for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.2",
@ -246,6 +250,7 @@ fn bengalfox() -> ModelPreset {
"Extra high reasoning depth for complex problems",
),
],
supports_personality: true,
is_default: false,
upgrade: None,
show_in_picker: false,
@ -278,6 +283,7 @@ fn boomslang() -> ModelPreset {
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: false,
@ -306,6 +312,7 @@ fn gpt_5_codex() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex",
@ -340,6 +347,7 @@ fn gpt_5_codex_mini() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex-mini",
@ -378,6 +386,7 @@ fn gpt_5_1_codex() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex",
@ -420,6 +429,7 @@ fn gpt_5() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5",
@ -458,6 +468,7 @@ fn gpt_5_1() -> ModelPreset {
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1",

View file

@ -94,6 +94,7 @@ Each response yields:
- `reasoningEffort` one of `minimal|low|medium|high`
- `description` human-friendly label for the effort
- `defaultReasoningEffort` suggested effort for the UI
- `supportsPersonality` whether the model supports personality-specific instructions
- `isDefault` whether the model is recommended for most users
- `nextCursor` pass into the next request to continue paging (optional)

View file

@ -2,6 +2,7 @@ use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use strum_macros::EnumIter;
use ts_rs::TS;
use crate::openai_models::ReasoningEffort;
@ -78,6 +79,7 @@ pub enum SandboxMode {
TS,
PartialOrd,
Ord,
EnumIter,
)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]

View file

@ -78,6 +78,9 @@ pub struct ModelPreset {
pub default_reasoning_effort: ReasoningEffort,
/// Supported reasoning effort options.
pub supported_reasoning_efforts: Vec<ReasoningEffortPreset>,
/// Whether this model supports personality-specific instructions.
#[serde(default)]
pub supports_personality: bool,
/// Whether this is the default model for new users.
pub is_default: bool,
/// recommended upgrade model
@ -214,6 +217,12 @@ impl ModelInfo {
})
}
pub fn supports_personality(&self) -> bool {
self.model_instructions_template
.as_ref()
.is_some_and(ModelInstructionsTemplate::supports_personality)
}
pub fn get_model_instructions(&self, personality: Option<Personality>) -> String {
if let Some(personality) = personality
&& let Some(template) = &self.model_instructions_template
@ -249,6 +258,13 @@ impl ModelInstructionsTemplate {
fn has_personality_placeholder(&self) -> bool {
self.template.contains(PERSONALITY_PLACEHOLDER)
}
fn supports_personality(&self) -> bool {
self.has_personality_placeholder()
&& self.personality_messages.as_ref().is_some_and(|messages| {
Personality::iter().all(|personality| messages.0.contains_key(&personality))
})
}
}
// serializes as a dictionary from personality to message
@ -280,6 +296,7 @@ pub struct ModelsResponse {
// convert ModelInfo to ModelPreset
impl From<ModelInfo> for ModelPreset {
fn from(info: ModelInfo) -> Self {
let supports_personality = info.supports_personality();
ModelPreset {
id: info.slug.clone(),
model: info.slug.clone(),
@ -289,6 +306,7 @@ impl From<ModelInfo> for ModelPreset {
.default_reasoning_level
.unwrap_or(ReasoningEffort::None),
supported_reasoning_efforts: info.supported_reasoning_levels.clone(),
supports_personality,
is_default: false, // default is the highest priority available model
upgrade: info.upgrade.as_ref().map(|upgrade| ModelUpgrade {
id: upgrade.model.clone(),

View file

@ -60,6 +60,7 @@ use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
use codex_otel::OtelManager;
use codex_protocol::ThreadId;
use codex_protocol::config_types::Personality;
use codex_protocol::items::TurnItem;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
@ -932,14 +933,10 @@ impl App {
let auth = auth_manager.auth().await;
let auth_ref = auth.as_ref();
let model_info = thread_manager
.get_models_manager()
.get_model_info(model.as_str(), &config)
.await;
let otel_manager = OtelManager::new(
ThreadId::new(),
model.as_str(),
model_info.slug.as_str(),
model.as_str(),
auth_ref.and_then(CodexAuth::get_account_id),
auth_ref.and_then(CodexAuth::get_account_email),
auth_ref.map(|auth| auth.mode),
@ -1505,6 +1502,9 @@ impl App {
AppEvent::UpdateCollaborationMode(mask) => {
self.chat_widget.set_collaboration_mask(mask);
}
AppEvent::UpdatePersonality(personality) => {
self.on_update_personality(personality);
}
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
}
@ -1758,6 +1758,41 @@ impl App {
}
}
}
AppEvent::PersistPersonalitySelection { personality } => {
let profile = self.active_profile.as_deref();
match ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(profile)
.set_model_personality(Some(personality))
.apply()
.await
{
Ok(()) => {
let label = Self::personality_label(personality);
let mut message = format!("Personality set to {label}");
if let Some(profile) = profile {
message.push_str(" for ");
message.push_str(profile);
message.push_str(" profile");
}
self.chat_widget.add_info_message(message, None);
}
Err(err) => {
tracing::error!(
error = %err,
"failed to persist personality selection"
);
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save personality for profile `{profile}`: {err}"
));
} else {
self.chat_widget.add_error_message(format!(
"Failed to save default personality: {err}"
));
}
}
}
}
AppEvent::UpdateAskForApprovalPolicy(policy) => {
self.runtime_approval_policy_override = Some(policy);
if let Err(err) = self.config.approval_policy.set(policy) {
@ -2162,6 +2197,18 @@ impl App {
self.chat_widget.set_reasoning_effort(effort);
}
fn on_update_personality(&mut self, personality: Personality) {
self.config.model_personality = Some(personality);
self.chat_widget.set_personality(personality);
}
fn personality_label(personality: Personality) -> &'static str {
match personality {
Personality::Friendly => "Friendly",
Personality::Pragmatic => "Pragmatic",
}
}
async fn launch_external_editor(&mut self, tui: &mut tui::Tui) {
let editor_cmd = match external_editor::resolve_editor_command() {
Ok(cmd) => cmd,

View file

@ -24,6 +24,7 @@ use codex_core::features::Feature;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::Personality;
use codex_protocol::openai_models::ReasoningEffort;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -106,12 +107,20 @@ pub(crate) enum AppEvent {
/// Update the active collaboration mask in the running app and widget.
UpdateCollaborationMode(CollaborationModeMask),
/// Update the current personality in the running app and widget.
UpdatePersonality(Personality),
/// Persist the selected model and reasoning effort to the appropriate config.
PersistModelSelection {
model: String,
effort: Option<ReasoningEffort>,
},
/// Persist the selected personality to the appropriate config.
PersistPersonalitySelection {
personality: Personality,
},
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: ModelPreset,

View file

@ -235,6 +235,7 @@ pub(crate) struct ChatComposer {
steer_enabled: bool,
collaboration_modes_enabled: bool,
collaboration_mode_indicator: Option<CollaborationModeIndicator>,
personality_command_enabled: bool,
}
#[derive(Clone, Debug)]
@ -296,6 +297,7 @@ impl ChatComposer {
steer_enabled: false,
collaboration_modes_enabled: false,
collaboration_mode_indicator: None,
personality_command_enabled: false,
};
// Apply configuration via the setter to keep side-effects centralized.
this.set_disable_paste_burst(disable_paste_burst);
@ -327,6 +329,10 @@ impl ChatComposer {
self.collaboration_mode_indicator = indicator;
}
pub fn set_personality_command_enabled(&mut self, enabled: bool) {
self.personality_command_enabled = enabled;
}
fn layout_areas(&self, area: Rect) -> [Rect; 3] {
let footer_props = self.footer_props();
let footer_hint_height = self
@ -1631,9 +1637,11 @@ impl ChatComposer {
if let Some((name, _rest, _rest_offset)) = parse_slash_name(&text) {
let treat_as_plain_text = input_starts_with_space || name.contains('/');
if !treat_as_plain_text {
let is_builtin =
Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled)
.any(|(command_name, _)| command_name == name);
let is_builtin = Self::built_in_slash_commands_for_input(
self.collaboration_modes_enabled,
self.personality_command_enabled,
)
.any(|(command_name, _)| command_name == name);
let prompt_prefix = format!("{PROMPTS_CMD_PREFIX}:");
let is_known_prompt = name
.strip_prefix(&prompt_prefix)
@ -1798,9 +1806,11 @@ impl ChatComposer {
let first_line = self.textarea.text().lines().next().unwrap_or("");
if let Some((name, rest, _rest_offset)) = parse_slash_name(first_line)
&& rest.is_empty()
&& let Some((_n, cmd)) =
Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled)
.find(|(n, _)| *n == name)
&& let Some((_n, cmd)) = Self::built_in_slash_commands_for_input(
self.collaboration_modes_enabled,
self.personality_command_enabled,
)
.find(|(n, _)| *n == name)
{
self.textarea.set_text_clearing_elements("");
Some(InputResult::Command(cmd))
@ -1820,9 +1830,11 @@ impl ChatComposer {
if let Some((name, rest, _rest_offset)) = parse_slash_name(&text)
&& !rest.is_empty()
&& !name.contains('/')
&& let Some((_n, cmd)) =
Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled)
.find(|(command_name, _)| *command_name == name)
&& let Some((_n, cmd)) = Self::built_in_slash_commands_for_input(
self.collaboration_modes_enabled,
self.personality_command_enabled,
)
.find(|(command_name, _)| *command_name == name)
&& cmd == SlashCommand::Review
{
self.textarea.set_text_clearing_elements("");
@ -2253,9 +2265,11 @@ impl ChatComposer {
return rest_after_name.is_empty();
}
let builtin_match =
Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled)
.any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some());
let builtin_match = Self::built_in_slash_commands_for_input(
self.collaboration_modes_enabled,
self.personality_command_enabled,
)
.any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some());
if builtin_match {
return true;
@ -2308,10 +2322,12 @@ impl ChatComposer {
_ => {
if is_editing_slash_command_name {
let collaboration_modes_enabled = self.collaboration_modes_enabled;
let personality_command_enabled = self.personality_command_enabled;
let mut command_popup = CommandPopup::new(
self.custom_prompts.clone(),
CommandPopupFlags {
collaboration_modes_enabled,
personality_command_enabled,
},
);
command_popup.on_composer_text_change(first_line.to_string());
@ -2323,12 +2339,16 @@ impl ChatComposer {
fn built_in_slash_commands_for_input(
collaboration_modes_enabled: bool,
personality_command_enabled: bool,
) -> impl Iterator<Item = (&'static str, SlashCommand)> {
let allow_elevate_sandbox = windows_degraded_sandbox_active();
built_in_slash_commands()
.into_iter()
.filter(move |(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox)
.filter(move |(_, cmd)| collaboration_modes_enabled || *cmd != SlashCommand::Collab)
.filter(move |(_, cmd)| {
personality_command_enabled || *cmd != SlashCommand::Personality
})
}
pub(crate) fn set_custom_prompts(&mut self, prompts: Vec<CustomPrompt>) {

View file

@ -39,6 +39,7 @@ pub(crate) struct CommandPopup {
#[derive(Clone, Copy, Debug, Default)]
pub(crate) struct CommandPopupFlags {
pub(crate) collaboration_modes_enabled: bool,
pub(crate) personality_command_enabled: bool,
}
impl CommandPopup {
@ -48,6 +49,9 @@ impl CommandPopup {
.into_iter()
.filter(|(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox)
.filter(|(_, cmd)| flags.collaboration_modes_enabled || *cmd != SlashCommand::Collab)
.filter(|(_, cmd)| {
flags.personality_command_enabled || *cmd != SlashCommand::Personality
})
.collect();
// Exclude prompts that collide with builtin command names and sort by name.
let exclude: HashSet<String> = builtins.iter().map(|(n, _)| (*n).to_string()).collect();
@ -216,6 +220,7 @@ impl CommandPopup {
display_shortcut: None,
description: Some(description),
wrap_indent: None,
is_disabled: false,
disabled_reason: None,
}
})
@ -466,6 +471,7 @@ mod tests {
Vec::new(),
CommandPopupFlags {
collaboration_modes_enabled: true,
personality_command_enabled: true,
},
);
popup.on_composer_text_change("/collab".to_string());
@ -475,4 +481,46 @@ mod tests {
other => panic!("expected collab to be selected for exact match, got {other:?}"),
}
}
#[test]
fn personality_command_hidden_when_disabled() {
let mut popup = CommandPopup::new(
Vec::new(),
CommandPopupFlags {
collaboration_modes_enabled: true,
personality_command_enabled: false,
},
);
popup.on_composer_text_change("/pers".to_string());
let cmds: Vec<&str> = popup
.filtered_items()
.into_iter()
.filter_map(|item| match item {
CommandItem::Builtin(cmd) => Some(cmd.command()),
CommandItem::UserPrompt(_) => None,
})
.collect();
assert!(
!cmds.contains(&"personality"),
"expected '/personality' to be hidden when disabled, got {cmds:?}"
);
}
#[test]
fn personality_command_visible_when_enabled() {
let mut popup = CommandPopup::new(
Vec::new(),
CommandPopupFlags {
collaboration_modes_enabled: true,
personality_command_enabled: true,
},
);
popup.on_composer_text_change("/personality".to_string());
match popup.selected_item() {
Some(CommandItem::Builtin(cmd)) => assert_eq!(cmd.command(), "personality"),
other => panic!("expected personality to be selected for exact match, got {other:?}"),
}
}
}

View file

@ -132,6 +132,7 @@ impl WidgetRef for &FileSearchPopup {
display_shortcut: None,
description: None,
wrap_indent: None,
is_disabled: false,
disabled_reason: None,
})
.collect()

View file

@ -42,6 +42,7 @@ pub(crate) struct SelectionItem {
pub selected_description: Option<String>,
pub is_current: bool,
pub is_default: bool,
pub is_disabled: bool,
pub actions: Vec<SelectionAction>,
pub dismiss_on_select: bool,
pub search_value: Option<String>,
@ -217,12 +218,14 @@ impl ListSelectionView {
.flatten()
.or_else(|| item.description.clone());
let wrap_indent = description.is_none().then_some(wrap_prefix_width);
let is_disabled = item.is_disabled || item.disabled_reason.is_some();
GenericDisplayRow {
name: display_name,
display_shortcut: item.display_shortcut,
match_indices: None,
description,
wrap_indent,
is_disabled,
disabled_reason: item.disabled_reason.clone(),
}
})
@ -247,19 +250,27 @@ impl ListSelectionView {
}
fn accept(&mut self) {
if let Some(idx) = self.state.selected_idx
&& let Some(actual_idx) = self.filtered_indices.get(idx)
&& let Some(item) = self.items.get(*actual_idx)
let selected_item = self
.state
.selected_idx
.and_then(|idx| self.filtered_indices.get(idx))
.and_then(|actual_idx| self.items.get(*actual_idx));
if let Some(item) = selected_item
&& item.disabled_reason.is_none()
&& !item.is_disabled
{
self.last_selected_actual_idx = Some(*actual_idx);
if let Some(idx) = self.state.selected_idx
&& let Some(actual_idx) = self.filtered_indices.get(idx)
{
self.last_selected_actual_idx = Some(*actual_idx);
}
for act in &item.actions {
act(&self.app_event_tx);
}
if item.dismiss_on_select {
self.complete = true;
}
} else {
} else if selected_item.is_none() {
self.complete = true;
}
}
@ -286,7 +297,7 @@ impl ListSelectionView {
&& self
.items
.get(*actual_idx)
.is_some_and(|item| item.disabled_reason.is_some())
.is_some_and(|item| item.disabled_reason.is_some() || item.is_disabled)
{
self.state.move_down_wrap(len);
} else {
@ -303,7 +314,7 @@ impl ListSelectionView {
&& self
.items
.get(*actual_idx)
.is_some_and(|item| item.disabled_reason.is_some())
.is_some_and(|item| item.disabled_reason.is_some() || item.is_disabled)
{
self.state.move_up_wrap(len);
} else {
@ -395,7 +406,7 @@ impl BottomPaneView for ListSelectionView {
&& self
.items
.get(idx)
.is_some_and(|item| item.disabled_reason.is_none())
.is_some_and(|item| item.disabled_reason.is_none() && !item.is_disabled)
{
self.state.selected_idx = Some(idx);
self.accept();

View file

@ -215,6 +215,11 @@ impl BottomPane {
self.request_redraw();
}
pub fn set_personality_command_enabled(&mut self, enabled: bool) {
self.composer.set_personality_command_enabled(enabled);
self.request_redraw();
}
pub fn status_widget(&self) -> Option<&StatusIndicatorWidget> {
self.status.as_ref()
}

View file

@ -23,7 +23,8 @@ pub(crate) struct GenericDisplayRow {
pub match_indices: Option<Vec<usize>>, // indices to bold (char positions)
pub description: Option<String>, // optional grey text after the name
pub disabled_reason: Option<String>, // optional disabled message
pub wrap_indent: Option<usize>, // optional indent for wrapped lines
pub is_disabled: bool,
pub wrap_indent: Option<usize>, // optional indent for wrapped lines
}
pub(crate) fn wrap_styled_line<'a>(line: &'a Line<'a>, width: u16) -> Vec<Line<'a>> {
@ -282,13 +283,18 @@ pub(crate) fn render_rows(
}
let mut full_line = build_full_line(row, desc_col);
if Some(i) == state.selected_idx {
if Some(i) == state.selected_idx && !row.is_disabled {
// Match previous behavior: cyan + bold for the selected row.
// Reset the style first to avoid inheriting dim from keyboard shortcuts.
full_line.spans.iter_mut().for_each(|span| {
span.style = Style::default().fg(Color::Cyan).bold();
});
}
if row.is_disabled {
full_line.spans.iter_mut().for_each(|span| {
span.style = span.style.dim();
});
}
// Wrap with subsequent indent aligned to the description column.
use crate::wrapping::RtOptions;
@ -364,11 +370,16 @@ pub(crate) fn render_rows_single_line(
}
let mut full_line = build_full_line(row, desc_col);
if Some(i) == state.selected_idx {
if Some(i) == state.selected_idx && !row.is_disabled {
full_line.spans.iter_mut().for_each(|span| {
span.style = Style::default().fg(Color::Cyan).bold();
});
}
if row.is_disabled {
full_line.spans.iter_mut().for_each(|span| {
span.style = span.style.dim();
});
}
let full_line = truncate_line_with_ellipsis_if_overflow(full_line, area.width as usize);
full_line.render(

View file

@ -96,6 +96,7 @@ impl SkillPopup {
match_indices: indices,
display_shortcut: None,
description: Some(description),
is_disabled: false,
disabled_reason: None,
wrap_indent: None,
}

View file

@ -95,6 +95,7 @@ use codex_protocol::approvals::ElicitationRequestEvent;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::models::local_image_label_text;
use codex_protocol::parse_command::ParsedCommand;
@ -738,6 +739,7 @@ impl ChatWidget {
None,
);
self.refresh_model_display();
self.sync_personality_command_enabled();
let session_info_cell = history_cell::new_session_info(
&self.config,
&model_for_header,
@ -2023,6 +2025,7 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
widget.update_collaboration_mode_indicator();
widget
@ -2147,6 +2150,7 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
widget
}
@ -2272,6 +2276,7 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
widget.update_collaboration_mode_indicator();
widget
@ -2507,6 +2512,9 @@ impl ChatWidget {
SlashCommand::Model => {
self.open_model_popup();
}
SlashCommand::Personality => {
self.open_personality_popup();
}
SlashCommand::Collab => {
if self.collaboration_modes_enabled() {
self.open_collaboration_modes_popup();
@ -2830,6 +2838,10 @@ impl ChatWidget {
} else {
None
};
let personality = self
.config
.model_personality
.filter(|_| self.current_model_supports_personality());
let op = Op::UserTurn {
items,
cwd: self.config.cwd.clone(),
@ -2840,7 +2852,7 @@ impl ChatWidget {
summary: self.config.model_reasoning_summary,
final_output_json_schema: None,
collaboration_mode,
personality: None,
personality,
};
self.codex_op_tx.send(op).unwrap_or_else(|e| {
@ -3382,6 +3394,76 @@ impl ChatWidget {
self.open_model_popup_with_presets(presets);
}
pub(crate) fn open_personality_popup(&mut self) {
if !self.is_session_configured() {
self.add_info_message(
"Personality selection is disabled until startup completes.".to_string(),
None,
);
return;
}
self.open_personality_popup_for_current_model();
}
fn open_personality_popup_for_current_model(&mut self) {
let current_model = self.current_model();
let current_personality = self.config.model_personality;
let personalities = [Personality::Friendly, Personality::Pragmatic];
let supports_personality = self.current_model_supports_personality();
let disabled_message = (!supports_personality).then(|| {
format!(
"Current model ({current_model}) doesn't support personalities. Try /model to switch to a newer model."
)
});
let items: Vec<SelectionItem> = personalities
.into_iter()
.map(|personality| {
let name = Self::personality_label(personality).to_string();
let description = Some(Self::personality_description(personality).to_string());
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: None,
effort: None,
summary: None,
collaboration_mode: None,
personality: Some(personality),
}));
tx.send(AppEvent::UpdatePersonality(personality));
tx.send(AppEvent::PersistPersonalitySelection { personality });
})];
SelectionItem {
name,
description,
is_current: current_personality == Some(personality),
is_disabled: !supports_personality,
actions,
dismiss_on_select: true,
..Default::default()
}
})
.collect();
let mut header = ColumnRenderable::new();
header.push(Line::from("Select Personality".bold()));
header.push(Line::from(
"Choose a communication style for future responses.".dim(),
));
if let Some(message) = disabled_message {
header.push(Line::from(message.red()));
}
self.bottom_pane.show_selection_view(SelectionViewParams {
header: Box::new(header),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
});
}
fn model_menu_header(&self, title: &str, subtitle: &str) -> Box<dyn Renderable> {
let title = title.to_string();
let subtitle = subtitle.to_string();
@ -4608,6 +4690,11 @@ impl ChatWidget {
}
}
/// Set the personality in the widget's config copy.
pub(crate) fn set_personality(&mut self, personality: Personality) {
self.config.model_personality = Some(personality);
}
/// Set the model in the widget's config copy and stored collaboration mode.
pub(crate) fn set_model(&mut self, model: &str) {
self.current_collaboration_mode =
@ -4619,6 +4706,7 @@ impl ChatWidget {
mask.model = Some(model.to_string());
}
self.refresh_model_display();
self.sync_personality_command_enabled();
}
pub(crate) fn current_model(&self) -> &str {
@ -4631,6 +4719,25 @@ impl ChatWidget {
.unwrap_or_else(|| self.current_collaboration_mode.model())
}
fn sync_personality_command_enabled(&mut self) {
self.bottom_pane
.set_personality_command_enabled(self.current_model_supports_personality());
}
fn current_model_supports_personality(&self) -> bool {
let model = self.current_model();
self.models_manager
.try_list_models(&self.config)
.ok()
.and_then(|models| {
models
.into_iter()
.find(|preset| preset.model == model)
.map(|preset| preset.supports_personality)
})
.unwrap_or(false)
}
#[allow(dead_code)] // Used in tests
pub(crate) fn current_collaboration_mode(&self) -> &CollaborationMode {
&self.current_collaboration_mode
@ -4746,6 +4853,20 @@ impl ChatWidget {
self.bottom_pane.set_collaboration_mode_indicator(indicator);
}
fn personality_label(personality: Personality) -> &'static str {
match personality {
Personality::Friendly => "Friendly",
Personality::Pragmatic => "Pragmatic",
}
}
fn personality_description(personality: Personality) -> &'static str {
match personality {
Personality::Friendly => "Warm, collaborative, and helpful.",
Personality::Pragmatic => "Concise, task-focused, and direct.",
}
}
/// Cycle to the next collaboration mode variant (Plan -> Code -> Plan).
fn cycle_collaboration_mode(&mut self) {
if !self.collaboration_modes_enabled() {

View file

@ -0,0 +1,11 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Personality
Choose a communication style for future responses.
1. Friendly Warm, collaborative, and helpful.
2. Pragmatic Concise, task-focused, and direct.
Press enter to confirm or esc to go back

View file

@ -65,6 +65,7 @@ use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffortPreset;
@ -788,7 +789,7 @@ async fn make_chatwidget_manual(
},
};
let current_collaboration_mode = base_mode;
let widget = ChatWidget {
let mut widget = ChatWidget {
app_event_tx,
codex_op_tx: op_tx,
bottom_pane: bottom,
@ -800,7 +801,7 @@ async fn make_chatwidget_manual(
auth_manager,
models_manager,
otel_manager,
session_header: SessionHeader::new(resolved_model),
session_header: SessionHeader::new(resolved_model.clone()),
initial_user_message: None,
token_info: None,
rate_limit_snapshot: None,
@ -844,6 +845,7 @@ async fn make_chatwidget_manual(
current_rollout_path: None,
external_editor_state: ExternalEditorState::Closed,
};
widget.set_model(&resolved_model);
(widget, rx, op_rx)
}
@ -2416,6 +2418,25 @@ async fn collab_mode_enabling_keeps_custom_until_selected() {
assert_eq!(chat.current_collaboration_mode().mode, ModeKind::Custom);
}
#[tokio::test]
async fn user_turn_includes_personality_from_config() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(Some("bengalfox")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_model("bengalfox");
chat.set_personality(Personality::Friendly);
chat.bottom_pane
.set_composer_text("hello".to_string(), Vec::new(), Vec::new());
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
match next_submit_op(&mut op_rx) {
Op::UserTurn {
personality: Some(Personality::Friendly),
..
} => {}
other => panic!("expected Op::UserTurn with friendly personality, got {other:?}"),
}
}
#[tokio::test]
async fn slash_quit_requests_exit() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await;
@ -2960,6 +2981,16 @@ async fn model_selection_popup_snapshot() {
assert_snapshot!("model_selection_popup", popup);
}
#[tokio::test]
async fn personality_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("bengalfox")).await;
chat.thread_id = Some(ThreadId::new());
chat.open_personality_popup();
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("personality_selection_popup", popup);
}
#[tokio::test]
async fn model_picker_hides_show_in_picker_false_models_from_cache() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("test-visible-model")).await;
@ -2974,6 +3005,7 @@ async fn model_picker_hides_show_in_picker_false_models_from_cache() {
effort: ReasoningEffortConfig::Medium,
description: "medium".to_string(),
}],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker,
@ -3186,6 +3218,7 @@ async fn single_reasoning_option_skips_selection() {
description: "".to_string(),
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: single_effort,
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: true,

View file

@ -13,6 +13,7 @@ pub enum SlashCommand {
// DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so
// more frequently used commands should be listed first.
Model,
Personality,
Approvals,
Permissions,
#[strum(serialize = "setup-elevated-sandbox")]
@ -60,6 +61,7 @@ impl SlashCommand {
SlashCommand::Status => "show current session configuration and token usage",
SlashCommand::Ps => "list background terminals",
SlashCommand::Model => "choose what model and reasoning effort to use",
SlashCommand::Personality => "choose a communication style for responses",
SlashCommand::Collab => "change collaboration mode (experimental)",
SlashCommand::Agent => "switch the active agent thread",
SlashCommand::Approvals => "choose what Codex can do without approval",
@ -89,6 +91,7 @@ impl SlashCommand {
| SlashCommand::Compact
// | SlashCommand::Undo
| SlashCommand::Model
| SlashCommand::Personality
| SlashCommand::Approvals
| SlashCommand::Permissions
| SlashCommand::ElevateSandbox

View file

@ -104,7 +104,12 @@ pub(crate) mod announcement {
}
fn blocking_init_announcement_tip() -> Option<String> {
let response = reqwest::blocking::Client::new()
// Avoid system proxy detection to prevent macOS system-configuration panics (#8912).
let client = reqwest::blocking::Client::builder()
.no_proxy()
.build()
.ok()?;
let response = client
.get(ANNOUNCEMENT_TIP_URL)
.timeout(Duration::from_millis(2000))
.send()