add none personality option (#10688)

- add none personality enum value and empty placeholder behavior\n- add
docs/schema updates and e2e coverage
This commit is contained in:
Ahmed Ibrahim 2026-02-04 15:40:33 -08:00 committed by GitHub
parent 7bcc552325
commit f9c38f531c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 157 additions and 40 deletions

View file

@ -1171,6 +1171,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -12120,6 +12120,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -251,6 +251,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -29,6 +29,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -66,6 +66,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -2,4 +2,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type Personality = "friendly" | "pragmatic";
export type Personality = "none" | "friendly" | "pragmatic";

View file

@ -147,6 +147,8 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { … } } }
```
Valid `personality` values are `"friendly"`, `"pragmatic"`, and `"none"`. When `"none"` is selected, the personality placeholder is replaced with an empty string.
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
```json

View file

@ -708,6 +708,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],

View file

@ -375,6 +375,10 @@ pub fn sse(events: Vec<Value>) -> String {
out
}
pub fn sse_completed(id: &str) -> String {
sse(vec![ev_response_created(id), ev_completed(id)])
}
/// Convenience: SSE event for a completed response with a specific id.
pub fn ev_completed(id: &str) -> Value {
serde_json::json!({

View file

@ -19,12 +19,10 @@ use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_models_once;
use core_test_support::responses::mount_sse_once;
use core_test_support::responses::mount_sse_sequence;
use core_test_support::responses::sse;
use core_test_support::responses::sse_completed;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
@ -78,11 +76,7 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
@ -128,11 +122,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
@ -181,6 +171,111 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.personality = Some(Personality::None);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = resp_mock.single_request();
let instructions_text = request.instructions_text();
assert!(
!instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected no friendly personality template, got: {instructions_text:?}"
);
assert!(
!instructions_text.contains(LOCAL_PRAGMATIC_TEMPLATE),
"expected no pragmatic personality template, got: {instructions_text:?}"
);
assert!(
!instructions_text.contains("{{ personality }}"),
"expected personality placeholder to be removed, got: {instructions_text:?}"
);
let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message when personality is None"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn default_personality_is_friendly_without_config_toml() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = resp_mock.single_request();
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected default friendly template, got: {instructions_text:?}"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
@ -188,10 +283,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
@ -287,10 +379,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
@ -397,10 +486,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
@ -537,11 +623,7 @@ async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Resul
)
.await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
@ -657,11 +739,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
)
.await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
@ -774,10 +852,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;

View file

@ -73,6 +73,8 @@ Send input to the active turn:
- `sendUserMessage` → enqueue items to the conversation
- `sendUserTurn` → structured turn with explicit `cwd`, `approvalPolicy`, `sandboxPolicy`, `model`, optional `effort`, `summary`, optional `personality`, and optional `outputSchema` (JSON Schema for the final assistant message)
Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string.
For v2 threads, `turn/start` also accepts `outputSchema` to constrain the final assistant message for that turn.
Interrupt a running turn: `interruptConversation`.

View file

@ -72,6 +72,9 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc
- `Op::UserInputAnswer` Provide answers for a `request_user_input` tool call
- `Op::ListSkills` Request skills for one or more cwd values (optionally `force_reload`)
- `Op::UserTurn` and `Op::OverrideTurnContext` accept an optional `personality` override that updates the models communication style
Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string.
- `EventMsg`
- `EventMsg::AgentMessage` Messages from the `Model`
- `EventMsg::AgentMessageContentDelta` Streaming assistant text

View file

@ -96,6 +96,7 @@ pub enum WindowsSandboxLevel {
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum Personality {
None,
Friendly,
Pragmatic,
}

View file

@ -335,6 +335,7 @@ impl ModelInstructionsVariables {
pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
if let Some(personality) = personality {
match personality {
Personality::None => Some(String::new()),
Personality::Friendly => self.personality_friendly.clone(),
Personality::Pragmatic => self.personality_pragmatic.clone(),
}
@ -546,6 +547,10 @@ mod tests {
model.get_model_instructions(Some(Personality::Pragmatic)),
"Hello\n"
);
assert_eq!(
model.get_model_instructions(Some(Personality::None)),
"Hello\n"
);
assert_eq!(model.get_model_instructions(None), "Hello\n");
let model_no_personality = test_model(Some(ModelMessages {
@ -564,6 +569,10 @@ mod tests {
model_no_personality.get_model_instructions(Some(Personality::Pragmatic)),
"Hello\n"
);
assert_eq!(
model_no_personality.get_model_instructions(Some(Personality::None)),
"Hello\n"
);
assert_eq!(model_no_personality.get_model_instructions(None), "Hello\n");
}
@ -603,6 +612,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
Some("pragmatic".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(
personality_variables.get_personality_message(None),
Some("default".to_string())
@ -621,6 +634,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
None
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(
personality_variables.get_personality_message(None),
Some("default".to_string())
@ -639,6 +656,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
Some("pragmatic".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(personality_variables.get_personality_message(None), None);
}
}

View file

@ -2326,6 +2326,7 @@ impl App {
fn personality_label(personality: Personality) -> &'static str {
match personality {
Personality::None => "None",
Personality::Friendly => "Friendly",
Personality::Pragmatic => "Pragmatic",
}

View file

@ -5505,6 +5505,7 @@ impl ChatWidget {
fn personality_label(personality: Personality) -> &'static str {
match personality {
Personality::None => "None",
Personality::Friendly => "Friendly",
Personality::Pragmatic => "Pragmatic",
}
@ -5512,6 +5513,7 @@ impl ChatWidget {
fn personality_description(personality: Personality) -> &'static str {
match personality {
Personality::None => "No personality instructions.",
Personality::Friendly => "Warm, collaborative, and helpful.",
Personality::Pragmatic => "Concise, task-focused, and direct.",
}