Stop client from being state carrier (#10595)

I'd like to make client session wide. This requires shedding all random
state it has to carry.
This commit is contained in:
pakrym-oai 2026-02-04 09:05:37 -08:00 committed by GitHub
parent 49dd67a260
commit 7f20357611
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 127 additions and 176 deletions

View file

@ -153,61 +153,6 @@ impl ModelClient {
}
impl ModelClient {
pub fn get_model_context_window(&self) -> Option<i64> {
let model_info = &self.state.model_info;
let effective_context_window_percent = model_info.effective_context_window_percent;
model_info.context_window.map(|context_window| {
context_window.saturating_mul(effective_context_window_percent) / 100
})
}
pub fn config(&self) -> Arc<Config> {
Arc::clone(&self.state.config)
}
pub fn provider(&self) -> &ModelProviderInfo {
&self.state.provider
}
pub fn get_provider(&self) -> ModelProviderInfo {
self.state.provider.clone()
}
pub fn get_otel_manager(&self) -> OtelManager {
self.state.otel_manager.clone()
}
pub fn get_session_source(&self) -> SessionSource {
self.state.session_source.clone()
}
pub(crate) fn transport_manager(&self) -> TransportManager {
self.state.transport_manager.clone()
}
/// Returns the currently configured model slug.
pub fn get_model(&self) -> String {
self.state.model_info.slug.clone()
}
pub fn get_model_info(&self) -> ModelInfo {
self.state.model_info.clone()
}
/// Returns the current reasoning effort setting.
pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
self.state.effort
}
/// Returns the current reasoning summary setting.
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
self.state.summary
}
pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> {
self.state.auth_manager.clone()
}
/// Compacts the current conversation history using the Compact endpoint.
///
/// This is a unary call (no streaming) that returns a new list of

View file

@ -212,6 +212,7 @@ use codex_protocol::models::ContentItem;
use codex_protocol::models::DeveloperInstructions;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::protocol::CodexErrorInfo;
use codex_protocol::protocol::InitialHistory;
use codex_protocol::user_input::UserInput;
@ -484,6 +485,15 @@ pub(crate) struct Session {
pub(crate) struct TurnContext {
pub(crate) sub_id: String,
pub(crate) client: ModelClient,
pub(crate) config: Arc<Config>,
pub(crate) auth_manager: Option<Arc<AuthManager>>,
pub(crate) model_info: ModelInfo,
pub(crate) otel_manager: OtelManager,
pub(crate) provider: ModelProviderInfo,
pub(crate) reasoning_effort: Option<ReasoningEffortConfig>,
pub(crate) reasoning_summary: ReasoningSummaryConfig,
pub(crate) session_source: SessionSource,
pub(crate) transport_manager: TransportManager,
/// The session's current working directory. All relative paths provided by
/// the model as well as sandbox policies are resolved against this path
/// instead of `std::env::current_dir()`.
@ -507,6 +517,13 @@ pub(crate) struct TurnContext {
turn_metadata_header: OnceCell<Option<String>>,
}
impl TurnContext {
pub(crate) fn model_context_window(&self) -> Option<i64> {
let effective_context_window_percent = self.model_info.effective_context_window_percent;
self.model_info.context_window.map(|context_window| {
context_window.saturating_mul(effective_context_window_percent) / 100
})
}
pub(crate) fn resolve_path(&self, path: Option<String>) -> PathBuf {
path.as_ref()
.map(PathBuf::from)
@ -694,10 +711,17 @@ impl Session {
sub_id: String,
transport_manager: TransportManager,
) -> TurnContext {
let reasoning_effort = session_configuration.collaboration_mode.reasoning_effort();
let reasoning_summary = session_configuration.model_reasoning_summary;
let otel_manager = otel_manager.clone().with_model(
session_configuration.collaboration_mode.model(),
model_info.slug.as_str(),
);
let session_source = session_configuration.session_source.clone();
let auth_manager_for_context = auth_manager.clone();
let provider_for_context = provider.clone();
let transport_manager_for_context = transport_manager.clone();
let otel_manager_for_context = otel_manager.clone();
let per_turn_config = Arc::new(per_turn_config);
let client = ModelClient::new(
per_turn_config.clone(),
@ -705,10 +729,10 @@ impl Session {
model_info.clone(),
otel_manager,
provider,
session_configuration.collaboration_mode.reasoning_effort(),
session_configuration.model_reasoning_summary,
reasoning_effort,
reasoning_summary,
conversation_id,
session_configuration.session_source.clone(),
session_source.clone(),
transport_manager,
);
@ -722,6 +746,15 @@ impl Session {
TurnContext {
sub_id,
client,
config: per_turn_config.clone(),
auth_manager: auth_manager_for_context,
model_info: model_info.clone(),
otel_manager: otel_manager_for_context,
provider: provider_for_context,
reasoning_effort,
reasoning_summary,
session_source,
transport_manager: transport_manager_for_context,
cwd,
developer_instructions: session_configuration.developer_instructions.clone(),
compact_prompt: session_configuration.compact_prompt.clone(),
@ -1100,7 +1133,7 @@ impl Session {
None
}
}) {
let curr = turn_context.client.get_model();
let curr = turn_context.model_info.slug.as_str();
if prev != curr {
warn!(
"resuming session with different model: previous={prev}, current={curr}"
@ -1378,8 +1411,8 @@ impl Session {
if let Some(personality) = next.personality
&& next.personality != previous.personality
{
let model_info = next.client.get_model_info();
let personality_message = Self::personality_message_for(&model_info, personality);
let model_info = &next.model_info;
let personality_message = Self::personality_message_for(model_info, personality);
personality_message.map(|personality_message| {
DeveloperInstructions::personality_spec_message(personality_message).into()
})
@ -1943,7 +1976,7 @@ impl Session {
if self.features.enabled(Feature::Personality)
&& let Some(personality) = turn_context.personality
{
let model_info = turn_context.client.get_model_info();
let model_info = turn_context.model_info.clone();
let has_baked_personality = model_info.supports_personality()
&& base_instructions == model_info.get_model_instructions(Some(personality));
if !has_baked_personality
@ -1996,10 +2029,8 @@ impl Session {
{
let mut state = self.state.lock().await;
if let Some(token_usage) = token_usage {
state.update_token_info_from_usage(
token_usage,
turn_context.client.get_model_context_window(),
);
state
.update_token_info_from_usage(token_usage, turn_context.model_context_window());
}
}
self.send_token_count_event(turn_context).await;
@ -2030,7 +2061,7 @@ impl Session {
};
if info.model_context_window.is_none() {
info.model_context_window = turn_context.client.get_model_context_window();
info.model_context_window = turn_context.model_context_window();
}
state.set_token_info(Some(info));
@ -2088,7 +2119,7 @@ impl Session {
}
pub(crate) async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
if let Some(context_window) = turn_context.client.get_model_context_window() {
if let Some(context_window) = turn_context.model_context_window() {
let mut state = self.state.lock().await;
state.set_token_usage_full(context_window);
}
@ -2702,10 +2733,7 @@ mod handlers {
// new_turn_with_sub_id already emits the error event.
return;
};
current_context
.client
.get_otel_manager()
.user_prompt(&items);
current_context.otel_manager.user_prompt(&items);
// Attempt to inject input into current task
if let Err(items) = sess.inject_input(items).await {
@ -3247,7 +3275,7 @@ async fn spawn_review_thread(
let model = config
.review_model
.clone()
.unwrap_or_else(|| parent_turn_context.client.get_model());
.unwrap_or_else(|| parent_turn_context.model_info.slug.clone());
let review_model_info = sess
.services
.models_manager
@ -3266,8 +3294,8 @@ async fn spawn_review_thread(
});
let review_prompt = resolved.prompt.clone();
let provider = parent_turn_context.client.get_provider();
let auth_manager = parent_turn_context.client.get_auth_manager();
let provider = parent_turn_context.provider.clone();
let auth_manager = parent_turn_context.auth_manager.clone();
let model_info = review_model_info.clone();
// Build perturn client with the requested model/family.
@ -3277,9 +3305,16 @@ async fn spawn_review_thread(
per_turn_config.web_search_mode = Some(review_web_search_mode);
let otel_manager = parent_turn_context
.client
.get_otel_manager()
.otel_manager
.clone()
.with_model(model.as_str(), review_model_info.slug.as_str());
let auth_manager_for_context = auth_manager.clone();
let provider_for_context = provider.clone();
let otel_manager_for_context = otel_manager.clone();
let reasoning_effort = per_turn_config.model_reasoning_effort;
let reasoning_summary = per_turn_config.model_reasoning_summary;
let session_source = parent_turn_context.session_source.clone();
let transport_manager = parent_turn_context.transport_manager.clone();
let per_turn_config = Arc::new(per_turn_config);
let client = ModelClient::new(
@ -3288,16 +3323,25 @@ async fn spawn_review_thread(
model_info.clone(),
otel_manager,
provider,
per_turn_config.model_reasoning_effort,
per_turn_config.model_reasoning_summary,
reasoning_effort,
reasoning_summary,
sess.conversation_id,
parent_turn_context.client.get_session_source(),
parent_turn_context.client.transport_manager(),
session_source.clone(),
transport_manager.clone(),
);
let review_turn_context = TurnContext {
sub_id: sub_id.to_string(),
client,
config: per_turn_config,
auth_manager: auth_manager_for_context,
model_info: model_info.clone(),
otel_manager: otel_manager_for_context,
provider: provider_for_context,
reasoning_effort,
reasoning_summary,
session_source,
transport_manager,
tools_config,
ghost_snapshot: parent_turn_context.ghost_snapshot.clone(),
developer_instructions: None,
@ -3414,12 +3458,12 @@ pub(crate) async fn run_turn(
return None;
}
let model_info = turn_context.client.get_model_info();
let model_info = turn_context.model_info.clone();
let auto_compact_limit = model_info.auto_compact_token_limit().unwrap_or(i64::MAX);
let total_usage_tokens = sess.get_total_token_usage().await;
let event = EventMsg::TurnStarted(TurnStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
model_context_window: turn_context.model_context_window(),
collaboration_mode_kind: turn_context.collaboration_mode.mode,
});
sess.send_event(&turn_context, event).await;
@ -3438,7 +3482,7 @@ pub(crate) async fn run_turn(
|| (HashMap::new(), HashMap::new()),
|outcome| build_skill_name_counts(&outcome.skills, &outcome.disabled_paths),
);
let connector_slug_counts = if turn_context.client.config().features.enabled(Feature::Apps) {
let connector_slug_counts = if turn_context.config.features.enabled(Feature::Apps) {
let mcp_tools = match sess
.services
.mcp_connection_manager
@ -3467,7 +3511,7 @@ pub(crate) async fn run_turn(
});
let explicit_app_paths = collect_explicit_app_paths(&input);
let config = turn_context.client.config();
let config = turn_context.config.clone();
if config
.features
.enabled(Feature::SkillEnvVarDependencyPrompt)
@ -3484,9 +3528,9 @@ pub(crate) async fn run_turn(
)
.await;
let otel_manager = turn_context.client.get_otel_manager();
let otel_manager = turn_context.otel_manager.clone();
let thread_id = sess.conversation_id.to_string();
let tracking = build_track_events_context(turn_context.client.get_model(), thread_id);
let tracking = build_track_events_context(turn_context.model_info.slug.clone(), thread_id);
let SkillInjections {
items: skill_items,
warnings: skill_warnings,
@ -3639,7 +3683,7 @@ pub(crate) async fn run_turn(
}
async fn run_auto_compact(sess: &Arc<Session>, turn_context: &Arc<TurnContext>) {
if should_use_remote_compact_task(sess.as_ref(), &turn_context.client.get_provider()) {
if should_use_remote_compact_task(sess.as_ref(), &turn_context.provider) {
run_inline_remote_auto_compact_task(Arc::clone(sess), Arc::clone(turn_context)).await;
} else {
run_inline_auto_compact_task(Arc::clone(sess), Arc::clone(turn_context)).await;
@ -3748,7 +3792,7 @@ struct SamplingRequestToolSelection<'a> {
skip_all,
fields(
turn_id = %turn_context.sub_id,
model = %turn_context.client.get_model(),
model = %turn_context.model_info.slug,
cwd = %turn_context.cwd.display()
)
)]
@ -3769,7 +3813,7 @@ async fn run_sampling_request(
.list_all_tools()
.or_cancel(&cancellation_token)
.await?;
let connectors_for_tools = if turn_context.client.config().features.enabled(Feature::Apps) {
let connectors_for_tools = if turn_context.config.features.enabled(Feature::Apps) {
let connectors = connectors::accessible_connectors_from_mcp_tools(&mcp_tools);
Some(filter_connectors_for_input(
connectors,
@ -3794,10 +3838,7 @@ async fn run_sampling_request(
turn_context.dynamic_tools.as_slice(),
));
let model_supports_parallel = turn_context
.client
.get_model_info()
.supports_parallel_tool_calls;
let model_supports_parallel = turn_context.model_info.supports_parallel_tool_calls;
let base_instructions = sess.get_base_instructions().await;
@ -3845,7 +3886,7 @@ async fn run_sampling_request(
}
// Use the configured provider-specific stream retry budget.
let max_retries = turn_context.client.get_provider().stream_max_retries();
let max_retries = turn_context.provider.stream_max_retries();
if retries >= max_retries && client_session.try_switch_fallback_transport() {
sess.send_event(
&turn_context,
@ -4291,7 +4332,7 @@ async fn drain_in_flight(
skip_all,
fields(
turn_id = %turn_context.sub_id,
model = %turn_context.client.get_model()
model = %turn_context.model_info.slug
)
)]
async fn try_run_sampling_request(
@ -4308,11 +4349,11 @@ async fn try_run_sampling_request(
cwd: turn_context.cwd.clone(),
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.client.get_model(),
model: turn_context.model_info.slug.clone(),
personality: turn_context.personality,
collaboration_mode: Some(collaboration_mode),
effort: turn_context.client.get_reasoning_effort(),
summary: turn_context.client.get_reasoning_summary(),
effort: turn_context.reasoning_effort,
summary: turn_context.reasoning_summary,
user_instructions: turn_context.user_instructions.clone(),
developer_instructions: turn_context.developer_instructions.clone(),
final_output_json_schema: turn_context.final_output_json_schema.clone(),
@ -4320,10 +4361,10 @@ async fn try_run_sampling_request(
});
feedback_tags!(
model = turn_context.client.get_model(),
model = turn_context.model_info.slug.clone(),
approval_policy = turn_context.approval_policy,
sandbox_policy = turn_context.sandbox_policy,
effort = turn_context.client.get_reasoning_effort(),
effort = turn_context.reasoning_effort,
auth_mode = sess.services.auth_manager.get_auth_mode(),
features = sess.features.enabled_features(),
);

View file

@ -60,7 +60,7 @@ pub(crate) async fn run_compact_task(
input: Vec<UserInput>,
) {
let start_event = EventMsg::TurnStarted(TurnStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
model_context_window: turn_context.model_context_window(),
collaboration_mode_kind: turn_context.collaboration_mode.mode,
});
sess.send_event(&turn_context, start_event).await;
@ -85,7 +85,7 @@ async fn run_compact_task_inner(
let mut truncated_count = 0usize;
let max_retries = turn_context.client.get_provider().stream_max_retries();
let max_retries = turn_context.provider.stream_max_retries();
let mut retries = 0;
// TODO: If we need to guarantee the persisted mode always matches the prompt used for this
@ -97,11 +97,11 @@ async fn run_compact_task_inner(
cwd: turn_context.cwd.clone(),
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.client.get_model(),
model: turn_context.model_info.slug.clone(),
personality: turn_context.personality,
collaboration_mode: Some(collaboration_mode),
effort: turn_context.client.get_reasoning_effort(),
summary: turn_context.client.get_reasoning_summary(),
effort: turn_context.reasoning_effort,
summary: turn_context.reasoning_summary,
user_instructions: turn_context.user_instructions.clone(),
developer_instructions: turn_context.developer_instructions.clone(),
final_output_json_schema: turn_context.final_output_json_schema.clone(),

View file

@ -24,7 +24,7 @@ pub(crate) async fn run_inline_remote_auto_compact_task(
pub(crate) async fn run_remote_compact_task(sess: Arc<Session>, turn_context: Arc<TurnContext>) {
let start_event = EventMsg::TurnStarted(TurnStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
model_context_window: turn_context.model_context_window(),
collaboration_mode_kind: turn_context.collaboration_mode.mode,
});
sess.send_event(&turn_context, start_event).await;
@ -104,7 +104,7 @@ fn trim_function_call_history_to_fit_context_window(
turn_context: &TurnContext,
) -> usize {
let mut deleted_items = 0usize;
let Some(context_window) = turn_context.client.get_model_context_window() else {
let Some(context_window) = turn_context.model_context_window() else {
return deleted_items;
};

View file

@ -85,10 +85,8 @@ impl ContextManager {
// Estimate token usage using byte-based heuristics from the truncation helpers.
// This is a coarse lower bound, not a tokenizer-accurate count.
pub(crate) fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> {
let model_info = turn_context.client.get_model_info();
let personality = turn_context
.personality
.or(turn_context.client.config().personality);
let model_info = &turn_context.model_info;
let personality = turn_context.personality.or(turn_context.config.personality);
let base_instructions = model_info.get_model_instructions(personality);
let base_tokens = i64::try_from(approx_token_count(&base_instructions)).unwrap_or(i64::MAX);

View file

@ -144,7 +144,7 @@ pub(crate) async fn maybe_prompt_and_install_mcp_dependencies(
return;
}
let config = turn_context.client.config();
let config = turn_context.config.clone();
if mentioned_skills.is_empty() || !config.features.enabled(Feature::SkillMcpDependencyInstall) {
return;
}

View file

@ -120,8 +120,7 @@ pub(crate) async fn handle_mcp_tool_call(
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.client
.get_otel_manager()
.otel_manager
.counter("codex.mcp.call", 1, &[("status", status)]);
return ResponseInputItem::McpToolCallOutput { call_id, result };
@ -153,8 +152,7 @@ pub(crate) async fn handle_mcp_tool_call(
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.client
.get_otel_manager()
.otel_manager
.counter("codex.mcp.call", 1, &[("status", status)]);
ResponseInputItem::McpToolCallOutput { call_id, result }

View file

@ -100,8 +100,7 @@ pub(crate) async fn handle_output_item_done(
Err(FunctionCallError::MissingLocalShellCallId) => {
let msg = "LocalShellCall without call_id or id";
ctx.turn_context
.client
.get_otel_manager()
.otel_manager
.log_tool_failed("local_shell", msg);
tracing::error!(msg);

View file

@ -25,10 +25,7 @@ impl SessionTask for CompactTask {
_cancellation_token: CancellationToken,
) -> Option<String> {
let session = session.clone_session();
if crate::compact::should_use_remote_compact_task(
session.as_ref(),
&ctx.client.get_provider(),
) {
if crate::compact::should_use_remote_compact_task(session.as_ref(), &ctx.provider) {
let _ = session.services.otel_manager.counter(
"codex.task.compact",
1,

View file

@ -158,8 +158,7 @@ impl Session {
};
let timer = turn_context
.client
.get_otel_manager()
.otel_manager
.start_timer("codex.turn.e2e_duration_ms", &[])
.ok();

View file

@ -82,7 +82,7 @@ async fn start_review_conversation(
input: Vec<UserInput>,
cancellation_token: CancellationToken,
) -> Option<async_channel::Receiver<Event>> {
let config = ctx.client.config();
let config = ctx.config.clone();
let mut sub_agent_config = config.as_ref().clone();
// Carry over review-only feature restrictions so the delegate cannot
// re-enable blocked tools (web search, view image).
@ -94,7 +94,7 @@ async fn start_review_conversation(
let model = config
.review_model
.clone()
.unwrap_or_else(|| ctx.client.get_model());
.unwrap_or_else(|| ctx.model_info.slug.clone());
sub_agent_config.model = Some(model);
(run_codex_thread_one_shot(
sub_agent_config,

View file

@ -66,7 +66,7 @@ impl SessionTask for UserShellCommandTask {
.counter("codex.task.user_shell", 1, &[]);
let event = EventMsg::TurnStarted(TurnStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
model_context_window: turn_context.model_context_window(),
collaboration_mode_kind: turn_context.collaboration_mode.mode,
});
let session = session.clone_session();

View file

@ -114,7 +114,7 @@ mod spawn {
"Empty message can't be sent to an agent".to_string(),
));
}
let session_source = turn.client.get_session_source();
let session_source = turn.session_source.clone();
let child_depth = next_thread_spawn_depth(&session_source);
if exceeds_thread_spawn_depth_limit(child_depth) {
return Err(FunctionCallError::RespondToModel(
@ -593,13 +593,13 @@ fn build_agent_spawn_config(
turn: &TurnContext,
child_depth: i32,
) -> Result<Config, FunctionCallError> {
let base_config = turn.client.config();
let base_config = turn.config.clone();
let mut config = (*base_config).clone();
config.base_instructions = Some(base_instructions.text.clone());
config.model = Some(turn.client.get_model());
config.model_provider = turn.client.get_provider();
config.model_reasoning_effort = turn.client.get_reasoning_effort();
config.model_reasoning_summary = turn.client.get_reasoning_summary();
config.model = Some(turn.model_info.slug.clone());
config.model_provider = turn.provider.clone();
config.model_reasoning_effort = turn.reasoning_effort;
config.model_reasoning_summary = turn.reasoning_summary;
config.developer_instructions = turn.developer_instructions.clone();
config.compact_prompt = turn.compact_prompt.clone();
config.shell_environment_policy = turn.shell_environment_policy.clone();
@ -633,7 +633,6 @@ mod tests {
use crate::ThreadManager;
use crate::agent::MAX_THREAD_SPAWN_DEPTH;
use crate::built_in_model_providers;
use crate::client::ModelClient;
use crate::codex::make_session_and_context;
use crate::config::types::ShellEnvironmentPolicy;
use crate::function_tool::FunctionCallError;
@ -767,22 +766,10 @@ mod tests {
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
turn.session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id: session.conversation_id,
depth: MAX_THREAD_SPAWN_DEPTH,
});
turn.client = ModelClient::new(
turn.client.config(),
Some(session.services.auth_manager.clone()),
turn.client.get_model_info(),
turn.client.get_otel_manager(),
turn.client.get_provider(),
turn.client.get_reasoning_effort(),
turn.client.get_reasoning_summary(),
session.conversation_id,
session_source,
session.services.transport_manager.clone(),
);
let invocation = invocation(
Arc::new(session),
@ -865,7 +852,7 @@ mod tests {
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let config = turn.client.config().as_ref().clone();
let config = turn.config.as_ref().clone();
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let invocation = invocation(
@ -1008,7 +995,7 @@ mod tests {
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let config = turn.client.config().as_ref().clone();
let config = turn.config.as_ref().clone();
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let invocation = invocation(
@ -1053,7 +1040,7 @@ mod tests {
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let config = turn.client.config().as_ref().clone();
let config = turn.config.as_ref().clone();
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let invocation = invocation(
@ -1084,7 +1071,7 @@ mod tests {
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let config = turn.client.config().as_ref().clone();
let config = turn.config.as_ref().clone();
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let mut status_rx = manager
@ -1138,7 +1125,7 @@ mod tests {
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let config = turn.client.config().as_ref().clone();
let config = turn.config.as_ref().clone();
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let status_before = manager.agent_control().get_status(agent_id).await;
@ -1193,12 +1180,12 @@ mod tests {
turn.sandbox_policy = SandboxPolicy::DangerFullAccess;
let config = build_agent_spawn_config(&base_instructions, &turn, 0).expect("spawn config");
let mut expected = (*turn.client.config()).clone();
let mut expected = (*turn.config).clone();
expected.base_instructions = Some(base_instructions.text);
expected.model = Some(turn.client.get_model());
expected.model_provider = turn.client.get_provider();
expected.model_reasoning_effort = turn.client.get_reasoning_effort();
expected.model_reasoning_summary = turn.client.get_reasoning_summary();
expected.model = Some(turn.model_info.slug.clone());
expected.model_provider = turn.provider.clone();
expected.model_reasoning_effort = turn.reasoning_effort;
expected.model_reasoning_summary = turn.reasoning_summary;
expected.developer_instructions = turn.developer_instructions.clone();
expected.compact_prompt = turn.compact_prompt.clone();
expected.shell_environment_policy = turn.shell_environment_policy.clone();
@ -1217,24 +1204,11 @@ mod tests {
#[tokio::test]
async fn build_agent_spawn_config_preserves_base_user_instructions() {
let (session, mut turn) = make_session_and_context().await;
let session_source = turn.client.get_session_source();
let mut base_config = (*turn.client.config()).clone();
let (_session, mut turn) = make_session_and_context().await;
let mut base_config = (*turn.config).clone();
base_config.user_instructions = Some("base-user".to_string());
turn.user_instructions = Some("resolved-user".to_string());
let transport_manager = turn.client.transport_manager();
turn.client = ModelClient::new(
Arc::new(base_config.clone()),
Some(session.services.auth_manager.clone()),
turn.client.get_model_info(),
turn.client.get_otel_manager(),
turn.client.get_provider(),
turn.client.get_reasoning_effort(),
turn.client.get_reasoning_summary(),
session.conversation_id,
session_source,
transport_manager,
);
turn.config = Arc::new(base_config.clone());
let base_instructions = BaseInstructions {
text: "base".to_string(),
};

View file

@ -43,7 +43,7 @@ impl ToolOrchestrator {
where
T: ToolRuntime<Rq, Out>,
{
let otel = turn_ctx.client.get_otel_manager();
let otel = turn_ctx.otel_manager.clone();
let otel_tn = &tool_ctx.tool_name;
let otel_ci = &tool_ctx.call_id;
let otel_user = ToolDecisionSource::User;

View file

@ -70,7 +70,7 @@ impl ToolRegistry {
) -> Result<ResponseInputItem, FunctionCallError> {
let tool_name = invocation.tool_name.clone();
let call_id_owned = invocation.call_id.clone();
let otel = invocation.turn.client.get_otel_manager();
let otel = invocation.turn.otel_manager.clone();
let payload_for_response = invocation.payload.clone();
let log_payload = payload_for_response.log_payload();