diff --git a/codex-rs/app-server/tests/suite/v2/thread_resume.rs b/codex-rs/app-server/tests/suite/v2/thread_resume.rs index 358fec351..28b604001 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_resume.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_resume.rs @@ -403,7 +403,7 @@ async fn thread_resume_accepts_personality_override() -> Result<()> { .send_thread_resume_request(ThreadResumeParams { thread_id: thread.id.clone(), model: Some("gpt-5.2-codex".to_string()), - personality: Some(Personality::Pragmatic), + personality: Some(Personality::Friendly), ..Default::default() }) .await?; diff --git a/codex-rs/app-server/tests/suite/v2/turn_start.rs b/codex-rs/app-server/tests/suite/v2/turn_start.rs index bbf94f1bd..87ae4cfaa 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start.rs @@ -455,7 +455,7 @@ async fn turn_start_accepts_personality_override_v2() -> Result<()> { text: "Hello".to_string(), text_elements: Vec::new(), }], - personality: Some(Personality::Pragmatic), + personality: Some(Personality::Friendly), ..Default::default() }) .await?; @@ -560,7 +560,7 @@ async fn turn_start_change_personality_mid_thread_v2() -> Result<()> { text: "Hello again".to_string(), text_elements: Vec::new(), }], - personality: Some(Personality::Pragmatic), + personality: Some(Personality::Friendly), ..Default::default() }) .await?; diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 58768e584..94e36eb90 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -1550,7 +1550,7 @@ impl Config { .or_else(|| { features .enabled(Feature::Personality) - .then_some(Personality::Friendly) + .then_some(Personality::Pragmatic) }); let experimental_compact_prompt_path = config_profile @@ -3870,7 +3870,7 @@ model_verbosity = "high" model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, model_verbosity: None, - personality: Some(Personality::Friendly), + personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, developer_instructions: None, @@ -3956,7 +3956,7 @@ model_verbosity = "high" model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, model_verbosity: None, - personality: Some(Personality::Friendly), + personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, developer_instructions: None, @@ -4057,7 +4057,7 @@ model_verbosity = "high" model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, model_verbosity: None, - personality: Some(Personality::Friendly), + personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, developer_instructions: None, @@ -4144,7 +4144,7 @@ model_verbosity = "high" model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, model_verbosity: Some(Verbosity::High), - personality: Some(Personality::Friendly), + personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, developer_instructions: None, diff --git a/codex-rs/core/tests/suite/personality.rs b/codex-rs/core/tests/suite/personality.rs index ff2c0cfec..d5d7a81ba 100644 --- a/codex-rs/core/tests/suite/personality.rs +++ b/codex-rs/core/tests/suite/personality.rs @@ -233,7 +233,7 @@ async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn default_personality_is_friendly_without_config_toml() -> anyhow::Result<()> { +async fn default_personality_is_pragmatic_without_config_toml() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; @@ -269,7 +269,7 @@ async fn default_personality_is_friendly_without_config_toml() -> anyhow::Result let request = resp_mock.single_request(); let instructions_text = request.instructions_text(); assert!( - instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE), + instructions_text.contains(LOCAL_PRAGMATIC_TEMPLATE), "expected default friendly template, got: {instructions_text:?}" ); @@ -324,7 +324,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> effort: None, summary: None, collaboration_mode: None, - personality: Some(Personality::Pragmatic), + personality: Some(Personality::Friendly), }) .await?; @@ -365,7 +365,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> "expected personality update preamble, got {personality_text:?}" ); assert!( - personality_text.contains(LOCAL_PRAGMATIC_TEMPLATE), + personality_text.contains(LOCAL_FRIENDLY_TEMPLATE), "expected personality update to include the local pragmatic template, got: {personality_text:?}" ); @@ -902,7 +902,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() - effort: None, summary: None, collaboration_mode: None, - personality: Some(Personality::Pragmatic), + personality: Some(Personality::Friendly), }) .await?; @@ -942,7 +942,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() - "expected personality update preamble, got {personality_text:?}" ); assert!( - personality_text.contains(remote_pragmatic_message), + personality_text.contains(remote_friendly_message), "expected personality update to include remote template, got: {personality_text:?}" ); diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__personality_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__personality_selection_popup.snap index 3c6bba94e..3cd887f2e 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__personality_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__personality_selection_popup.snap @@ -5,7 +5,7 @@ expression: popup Select Personality Choose a communication style for Codex. Disable in /experimental. -› 1. Friendly (current) Warm, collaborative, and helpful. - 2. Pragmatic Concise, task-focused, and direct. + 1. Friendly Warm, collaborative, and helpful. +› 2. Pragmatic (current) Concise, task-focused, and direct. Press enter to confirm or esc to go back