Apply argument comment lint across codex-rs (#14652)

## Why

Once the repo-local lint exists, `codex-rs` needs to follow the
checked-in convention and CI needs to keep it from drifting. This commit
applies the fallback `/*param*/` style consistently across existing
positional literal call sites without changing those APIs.

The longer-term preference is still to avoid APIs that require comments
by choosing clearer parameter types and call shapes. This PR is
intentionally the mechanical follow-through for the places where the
existing signatures stay in place.

After rebasing onto newer `main`, the rollout also had to cover newly
introduced `tui_app_server` call sites. That made it clear the first cut
of the CI job was too expensive for the common path: it was spending
almost as much time installing `cargo-dylint` and re-testing the lint
crate as a representative test job spends running product tests. The CI
update keeps the full workspace enforcement but trims that extra
overhead from ordinary `codex-rs` PRs.

## What changed

- keep a dedicated `argument_comment_lint` job in `rust-ci`
- mechanically annotate remaining opaque positional literals across
`codex-rs` with exact `/*param*/` comments, including the rebased
`tui_app_server` call sites that now fall under the lint
- keep the checked-in style aligned with the lint policy by using
`/*param*/` and leaving string and char literals uncommented
- cache `cargo-dylint`, `dylint-link`, and the relevant Cargo
registry/git metadata in the lint job
- split changed-path detection so the lint crate's own `cargo test` step
runs only when `tools/argument-comment-lint/*` or `rust-ci.yml` changes
- continue to run the repo wrapper over the `codex-rs` workspace, so
product-code enforcement is unchanged

Most of the code changes in this commit are intentionally mechanical
comment rewrites or insertions driven by the lint itself.

## Verification

- `./tools/argument-comment-lint/run.sh --workspace`
- `cargo test -p codex-tui-app-server -p codex-tui`
- parsed `.github/workflows/rust-ci.yml` locally with PyYAML

---

* -> #14652
* #14651
This commit is contained in:
Michael Bolin 2026-03-16 16:48:15 -07:00 committed by GitHub
parent 6f05d8d735
commit b77fe8fefe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
261 changed files with 2311 additions and 1377 deletions

View file

@ -14,6 +14,8 @@ jobs:
name: Detect changed areas
runs-on: ubuntu-24.04
outputs:
argument_comment_lint: ${{ steps.detect.outputs.argument_comment_lint }}
argument_comment_lint_package: ${{ steps.detect.outputs.argument_comment_lint_package }}
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
@ -39,12 +41,18 @@ jobs:
fi
codex=false
argument_comment_lint=false
argument_comment_lint_package=false
workflows=false
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == codex-rs/* || $f == tools/argument-comment-lint/* || $f == justfile ]] && argument_comment_lint=true
[[ $f == tools/argument-comment-lint/* || $f == .github/workflows/rust-ci.yml ]] && argument_comment_lint_package=true
[[ $f == .github/* ]] && workflows=true
done
echo "argument_comment_lint=$argument_comment_lint" >> "$GITHUB_OUTPUT"
echo "argument_comment_lint_package=$argument_comment_lint_package" >> "$GITHUB_OUTPUT"
echo "codex=$codex" >> "$GITHUB_OUTPUT"
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
@ -83,6 +91,44 @@ jobs:
- name: cargo shear
run: cargo shear
argument_comment_lint:
name: Argument comment lint
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
steps:
- uses: actions/checkout@v6
- name: Install Linux sandbox build dependencies
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- uses: dtolnay/rust-toolchain@1.93.0
with:
toolchain: nightly-2025-09-18
components: llvm-tools-preview, rustc-dev, rust-src
- name: Cache cargo-dylint tooling
id: cargo_dylint_cache
uses: actions/cache@v5
with:
path: |
~/.cargo/bin/cargo-dylint
~/.cargo/bin/dylint-link
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
run: cargo install --locked cargo-dylint dylint-link
- name: Test argument comment lint package
if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' || github.event_name == 'push' }}
working-directory: tools/argument-comment-lint
run: cargo test
- name: Run argument comment lint on codex-rs
run: |
bash -n tools/argument-comment-lint/run.sh
./tools/argument-comment-lint/run.sh
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
@ -657,13 +703,15 @@ jobs:
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs: [changed, general, cargo_shear, lint_build, tests]
needs:
[changed, general, cargo_shear, argument_comment_lint, lint_build, tests]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "arglint: ${{ needs.argument_comment_lint.result }}"
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
@ -671,16 +719,21 @@ jobs:
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
if [[ '${{ needs.changed.outputs.argument_comment_lint }}' != 'true' && '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
echo 'No relevant changes -> CI not required.'
exit 0
fi
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
if [[ '${{ needs.changed.outputs.argument_comment_lint }}' == 'true' || '${{ needs.changed.outputs.workflows }}' == 'true' || '${{ github.event_name }}' == 'push' ]]; then
[[ '${{ needs.argument_comment_lint.result }}' == 'success' ]] || { echo 'argument_comment_lint failed'; exit 1; }
fi
if [[ '${{ needs.changed.outputs.codex }}' == 'true' || '${{ needs.changed.outputs.workflows }}' == 'true' || '${{ github.event_name }}' == 'push' ]]; then
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
fi
- name: sccache summary note
if: always()

View file

@ -182,7 +182,7 @@ pub fn generate_ts_with_options(
}
pub fn generate_json(out_dir: &Path) -> Result<()> {
generate_json_with_experimental(out_dir, false)
generate_json_with_experimental(out_dir, /*experimental_api*/ false)
}
pub fn generate_json_with_experimental(out_dir: &Path, experimental_api: bool) -> Result<()> {
@ -1984,7 +1984,7 @@ pub(crate) fn generate_index_ts_tree(tree: &mut BTreeMap<PathBuf, String>) {
if !v2_entries.is_empty() {
tree.insert(
PathBuf::from("v2").join("index.ts"),
index_ts_entries(&v2_entries, false),
index_ts_entries(&v2_entries, /*has_v2_ts*/ false),
);
}
}

View file

@ -201,7 +201,7 @@ impl ThreadHistoryBuilder {
let mut turn = self
.current_turn
.take()
.unwrap_or_else(|| self.new_turn(None));
.unwrap_or_else(|| self.new_turn(/*id*/ None));
let id = self.next_item_id();
let content = self.build_user_inputs(payload);
turn.items.push(ThreadItem::UserMessage { id, content });
@ -937,7 +937,7 @@ impl ThreadHistoryBuilder {
fn ensure_turn(&mut self) -> &mut PendingTurn {
if self.current_turn.is_none() {
let turn = self.new_turn(None);
let turn = self.new_turn(/*id*/ None);
return self.current_turn.insert(turn);
}

View file

@ -657,7 +657,7 @@ pub async fn send_message_v2(
&endpoint,
config_overrides,
user_message,
true,
/*experimental_api*/ true,
dynamic_tools,
)
.await
@ -1510,7 +1510,7 @@ impl CodexClient {
}
fn initialize(&mut self) -> Result<InitializeResponse> {
self.initialize_with_experimental_api(true)
self.initialize_with_experimental_api(/*experimental_api*/ true)
}
fn initialize_with_experimental_api(

View file

@ -78,7 +78,7 @@ pub(crate) fn typed_request_span(
.or(session.client_version.as_deref()),
);
attach_parent_context(&span, &method, request.id(), None);
attach_parent_context(&span, &method, request.id(), /*parent_trace*/ None);
span
}

View file

@ -1988,7 +1988,7 @@ async fn handle_turn_interrupted(
conversation_id,
event_turn_id,
TurnStatus::Interrupted,
None,
/*error*/ None,
outgoing,
)
.await;
@ -2379,7 +2379,7 @@ fn render_review_output_text(output: &ReviewOutputEvent) -> String {
sections.push(explanation.to_string());
}
if !output.findings.is_empty() {
let findings = format_review_findings_block(&output.findings, None);
let findings = format_review_findings_block(&output.findings, /*selection*/ None);
let trimmed = findings.trim();
if !trimmed.is_empty() {
sections.push(trimmed.to_string());
@ -2577,7 +2577,7 @@ async fn on_command_execution_request_approval_response(
item_id.clone(),
completion_item.command,
completion_item.cwd,
None,
/*process_id*/ None,
completion_item.command_actions,
status,
&outgoing,

View file

@ -446,7 +446,7 @@ impl CodexMessageProcessor {
}
pub(crate) async fn maybe_start_curated_repo_sync_for_latest_config(&self) {
match self.load_latest_config(None).await {
match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => self
.thread_manager
.plugins_manager()
@ -1623,7 +1623,10 @@ impl CodexMessageProcessor {
}
let cwd = cwd.unwrap_or_else(|| self.config.cwd.clone());
let mut env = create_env(&self.config.permissions.shell_environment_policy, None);
let mut env = create_env(
&self.config.permissions.shell_environment_policy,
/*thread_id*/ None,
);
if let Some(env_overrides) = env_overrides {
for (key, value) in env_overrides {
match value {
@ -1659,8 +1662,8 @@ impl CodexMessageProcessor {
Some(spec) => match spec
.start_proxy(
self.config.permissions.sandbox_policy.get(),
None,
None,
/*policy_decider*/ None,
/*blocked_request_observer*/ None,
managed_network_requirements_enabled,
NetworkProxyAuditMetadata::default(),
)
@ -2092,7 +2095,7 @@ impl CodexMessageProcessor {
otel.name = "app_server.thread_start.resolve_status",
))
.await,
false,
/*has_in_progress_turn*/ false,
);
let response = ThreadStartResponse {
@ -2541,7 +2544,7 @@ impl CodexMessageProcessor {
self.thread_watch_manager
.loaded_status_for_thread(&thread.id)
.await,
false,
/*has_in_progress_turn*/ false,
);
self.outgoing
@ -2592,10 +2595,10 @@ impl CodexMessageProcessor {
Some(state_db_ctx),
rollout_path.as_path(),
self.config.model_provider_id.as_str(),
None,
/*builder*/ None,
&[],
None,
None,
/*archived_only*/ None,
/*new_thread_memory_mode*/ None,
)
.await;
@ -2663,10 +2666,10 @@ impl CodexMessageProcessor {
Some(state_db_ctx),
rollout_path.as_path(),
self.config.model_provider_id.as_str(),
None,
/*builder*/ None,
&[],
None,
None,
/*archived_only*/ None,
/*new_thread_memory_mode*/ None,
)
.await;
@ -2853,7 +2856,7 @@ impl CodexMessageProcessor {
self.thread_watch_manager
.loaded_status_for_thread(&thread.id)
.await,
false,
/*has_in_progress_turn*/ false,
);
self.attach_thread_name(thread_id, &mut thread).await;
let thread_id = thread.id.clone();
@ -3327,8 +3330,13 @@ impl CodexMessageProcessor {
for connection_id in connection_ids {
Self::log_listener_attach_result(
self.ensure_conversation_listener(thread_id, connection_id, false, ApiVersion::V2)
.await,
self.ensure_conversation_listener(
thread_id,
connection_id,
/*raw_events_enabled*/ false,
ApiVersion::V2,
)
.await,
thread_id,
connection_id,
"thread",
@ -3463,7 +3471,7 @@ impl CodexMessageProcessor {
self.ensure_conversation_listener(
thread_id,
request_id.connection_id,
false,
/*raw_events_enabled*/ false,
ApiVersion::V2,
)
.await,
@ -3495,7 +3503,11 @@ impl CodexMessageProcessor {
.loaded_status_for_thread(&thread.id)
.await;
set_thread_status_and_interrupt_stale_turns(&mut thread, thread_status, false);
set_thread_status_and_interrupt_stale_turns(
&mut thread,
thread_status,
/*has_live_in_progress_turn*/ false,
);
let response = ThreadResumeResponse {
thread,
@ -3813,7 +3825,7 @@ impl CodexMessageProcessor {
if let Err(message) = populate_thread_turns(
&mut thread,
ThreadTurnSource::HistoryItems(&history_items),
None,
/*active_turn*/ None,
)
.await
{
@ -3930,7 +3942,7 @@ impl CodexMessageProcessor {
sandbox,
base_instructions,
developer_instructions,
None,
/*personality*/ None,
);
typesafe_overrides.ephemeral = ephemeral.then_some(true);
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
@ -4001,7 +4013,7 @@ impl CodexMessageProcessor {
self.ensure_conversation_listener(
thread_id,
request_id.connection_id,
false,
/*raw_events_enabled*/ false,
ApiVersion::V2,
)
.await,
@ -4035,7 +4047,8 @@ impl CodexMessageProcessor {
} else {
let config_snapshot = forked_thread.config_snapshot().await;
// forked thread names do not inherit the source thread name
let mut thread = build_thread_from_snapshot(thread_id, &config_snapshot, None);
let mut thread =
build_thread_from_snapshot(thread_id, &config_snapshot, /*path*/ None);
let history_items = match read_rollout_items_from_rollout(rollout_path.as_path()).await
{
Ok(items) => items,
@ -4055,7 +4068,7 @@ impl CodexMessageProcessor {
if let Err(message) = populate_thread_turns(
&mut thread,
ThreadTurnSource::HistoryItems(&history_items),
None,
/*active_turn*/ None,
)
.await
{
@ -4069,7 +4082,7 @@ impl CodexMessageProcessor {
&& let Err(message) = populate_thread_turns(
&mut thread,
ThreadTurnSource::RolloutPath(fork_rollout_path.as_path()),
None,
/*active_turn*/ None,
)
.await
{
@ -4085,7 +4098,7 @@ impl CodexMessageProcessor {
self.thread_watch_manager
.loaded_status_for_thread(&thread.id)
.await,
false,
/*has_in_progress_turn*/ false,
);
let response = ThreadForkResponse {
@ -4398,7 +4411,7 @@ impl CodexMessageProcessor {
params: ExperimentalFeatureListParams,
) {
let ExperimentalFeatureListParams { cursor, limit } = params;
let config = match self.load_latest_config(None).await {
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(error) => {
self.outgoing.send_error(request_id, error).await;
@ -4513,7 +4526,7 @@ impl CodexMessageProcessor {
}
async fn mcp_server_refresh(&self, request_id: ConnectionRequestId, _params: Option<()>) {
let config = match self.load_latest_config(None).await {
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(error) => {
self.outgoing.send_error(request_id, error).await;
@ -4572,7 +4585,7 @@ impl CodexMessageProcessor {
request_id: ConnectionRequestId,
params: McpServerOauthLoginParams,
) {
let config = match self.load_latest_config(None).await {
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(error) => {
self.outgoing.send_error(request_id, error).await;
@ -4684,7 +4697,7 @@ impl CodexMessageProcessor {
let request = request_id.clone();
let outgoing = Arc::clone(&self.outgoing);
let config = match self.load_latest_config(None).await {
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(error) => {
self.outgoing.send_error(request, error).await;
@ -4856,7 +4869,7 @@ impl CodexMessageProcessor {
async fn finalize_thread_teardown(&mut self, thread_id: ThreadId) {
self.pending_thread_unloads.lock().await.remove(&thread_id);
self.outgoing
.cancel_requests_for_thread(thread_id, None)
.cancel_requests_for_thread(thread_id, /*error*/ None)
.await;
self.thread_state_manager
.remove_thread_state(thread_id)
@ -4919,7 +4932,7 @@ impl CodexMessageProcessor {
// Any pending app-server -> client requests for this thread can no longer be
// answered; cancel their callbacks before shutdown/unload.
self.outgoing
.cancel_requests_for_thread(thread_id, None)
.cancel_requests_for_thread(thread_id, /*error*/ None)
.await;
self.thread_state_manager
.remove_thread_state(thread_id)
@ -5092,7 +5105,7 @@ impl CodexMessageProcessor {
}
async fn apps_list(&self, request_id: ConnectionRequestId, params: AppsListParams) {
let mut config = match self.load_latest_config(None).await {
let mut config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(error) => {
self.outgoing.send_error(request_id, error).await;
@ -5389,7 +5402,7 @@ impl CodexMessageProcessor {
} = params;
let roots = cwds.unwrap_or_default();
let mut config = match self.load_latest_config(None).await {
let mut config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(err) => {
self.outgoing.send_error(request_id, err).await;
@ -5422,7 +5435,7 @@ impl CodexMessageProcessor {
}
}
config = match self.load_latest_config(None).await {
config = match self.load_latest_config(/*fallback_cwd*/ None).await {
Ok(config) => config,
Err(err) => {
self.outgoing.send_error(request_id, err).await;
@ -5692,9 +5705,9 @@ impl CodexMessageProcessor {
Vec::new()
} else {
let (all_connectors_result, accessible_connectors_result) = tokio::join!(
connectors::list_all_connectors_with_options(&config, true),
connectors::list_all_connectors_with_options(&config, /*force_refetch*/ true),
connectors::list_accessible_connectors_from_mcp_tools_with_options_and_status(
&config, true
&config, /*force_refetch*/ true
),
);
@ -6046,7 +6059,7 @@ impl CodexMessageProcessor {
.ensure_conversation_listener(
thread_id,
request_id.connection_id,
false,
/*raw_events_enabled*/ false,
ApiVersion::V2,
)
.await
@ -6327,7 +6340,7 @@ impl CodexMessageProcessor {
usize::MAX,
config,
rollout_path,
false,
/*persist_extended_history*/ false,
self.request_trace_context(request_id).await,
)
.await
@ -6341,7 +6354,7 @@ impl CodexMessageProcessor {
self.ensure_conversation_listener(
thread_id,
request_id.connection_id,
false,
/*raw_events_enabled*/ false,
ApiVersion::V2,
)
.await,
@ -6362,7 +6375,7 @@ impl CodexMessageProcessor {
self.thread_watch_manager
.loaded_status_for_thread(&thread.id)
.await,
false,
/*has_in_progress_turn*/ false,
);
let notif = ThreadStartedNotification { thread };
self.outgoing
@ -7050,7 +7063,7 @@ impl CodexMessageProcessor {
tokio::spawn(async move {
let derived_config = derive_config_for_cwd(
&cli_overrides,
None,
/*request_overrides*/ None,
ConfigOverrides {
cwd: Some(command_cwd.clone()),
..Default::default()

View file

@ -15,15 +15,16 @@ pub(super) async fn load_plugin_app_summaries(
return Vec::new();
}
let connectors = match connectors::list_all_connectors_with_options(config, false).await {
Ok(connectors) => connectors,
Err(err) => {
warn!("failed to load app metadata for plugin/read: {err:#}");
connectors::list_cached_all_connectors(config)
.await
.unwrap_or_default()
}
};
let connectors =
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
Ok(connectors) => connectors,
Err(err) => {
warn!("failed to load app metadata for plugin/read: {err:#}");
connectors::list_cached_all_connectors(config)
.await
.unwrap_or_default()
}
};
connectors::connectors_for_plugin_apps(connectors, plugin_apps)
.into_iter()

View file

@ -201,7 +201,9 @@ impl CommandExecManager {
let sessions = Arc::clone(&self.sessions);
tokio::spawn(async move {
let _started_network_proxy = started_network_proxy;
match codex_core::sandboxing::execute_env(exec_request, None).await {
match codex_core::sandboxing::execute_env(exec_request, /*stdout_stream*/ None)
.await
{
Ok(output) => {
outgoing
.send_response(

View file

@ -390,8 +390,8 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle {
Arc::clone(&outbound_initialized),
Arc::clone(&outbound_experimental_api_enabled),
Arc::clone(&outbound_opted_out_notification_methods),
true,
None,
/*allow_legacy_notifications*/ true,
/*disconnect_sender*/ None,
),
);
let mut outbound_handle = tokio::spawn(async move {

View file

@ -266,10 +266,10 @@ fn app_text_range(range: &CoreTextRange) -> AppTextRange {
fn project_config_warning(config: &Config) -> Option<ConfigWarningNotification> {
let mut disabled_folders = Vec::new();
for layer in config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
{
for layer in config.config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ true,
) {
if !matches!(layer.name, ConfigLayerSource::Project { .. })
|| layer.disabled_reason.is_none()
{
@ -418,7 +418,7 @@ pub async fn run_main_with_transport(
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
/*enable_codex_api_key_env*/ false,
config.cli_auth_credentials_store_mode,
);
cloud_requirements_loader(

View file

@ -37,7 +37,7 @@ fn main() -> anyhow::Result<()> {
arg0_paths,
CliConfigOverrides::default(),
loader_overrides,
false,
/*default_analytics_enabled*/ false,
transport,
)
.await?;

View file

@ -332,7 +332,7 @@ impl MessageProcessor {
request_id.clone(),
codex_request,
session,
None,
/*outbound_initialized*/ None,
request_context.clone(),
)
.await;
@ -358,7 +358,8 @@ impl MessageProcessor {
};
let request_span =
crate::app_server_tracing::typed_request_span(&request, connection_id, session);
let request_context = RequestContext::new(request_id.clone(), request_span, None);
let request_context =
RequestContext::new(request_id.clone(), request_span, /*parent_trace*/ None);
tracing::trace!(
?connection_id,
request_id = ?request_id.request_id,

View file

@ -234,7 +234,10 @@ impl OutgoingMessageSender {
&self,
request: ServerRequestPayload,
) -> (RequestId, oneshot::Receiver<ClientRequestResult>) {
self.send_request_to_connections(None, request, None).await
self.send_request_to_connections(
/*connection_ids*/ None, request, /*thread_id*/ None,
)
.await
}
fn next_request_id(&self) -> RequestId {

View file

@ -284,7 +284,7 @@ impl ThreadStateManager {
{
let mut thread_state_guard = thread_state.lock().await;
if experimental_raw_events {
thread_state_guard.set_experimental_raw_events(true);
thread_state_guard.set_experimental_raw_events(/*enabled*/ true);
}
}
Some(thread_state)

View file

@ -91,13 +91,17 @@ impl ThreadWatchManager {
}
pub(crate) async fn upsert_thread(&self, thread: Thread) {
self.mutate_and_publish(move |state| state.upsert_thread(thread.id, true))
.await;
self.mutate_and_publish(move |state| {
state.upsert_thread(thread.id, /*emit_notification*/ true)
})
.await;
}
pub(crate) async fn upsert_thread_silently(&self, thread: Thread) {
self.mutate_and_publish(move |state| state.upsert_thread(thread.id, false))
.await;
self.mutate_and_publish(move |state| {
state.upsert_thread(thread.id, /*emit_notification*/ false)
})
.await;
}
pub(crate) async fn remove_thread(&self, thread_id: &str) {

View file

@ -267,7 +267,8 @@ impl McpProcess {
/// Send an `account/rateLimits/read` JSON-RPC request.
pub async fn send_get_account_rate_limits_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/rateLimits/read", None).await
self.send_request("account/rateLimits/read", /*params*/ None)
.await
}
/// Send an `account/read` JSON-RPC request.
@ -768,7 +769,7 @@ impl McpProcess {
/// Send an `account/logout` JSON-RPC request.
pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/logout", None).await
self.send_request("account/logout", /*params*/ None).await
}
/// Send an `account/login/start` JSON-RPC request for API key login.

View file

@ -37,7 +37,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
availability_nux: None,
apply_patch_tool_type: None,
web_search_tool_type: Default::default(),
truncation_policy: TruncationPolicyConfig::bytes(10_000),
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),

View file

@ -399,7 +399,7 @@ fn compute_replacements(
original_lines,
std::slice::from_ref(ctx_line),
line_index,
false,
/*eof*/ false,
) {
line_index = idx + 1;
} else {
@ -512,7 +512,7 @@ pub fn unified_diff_from_chunks(
path: &Path,
chunks: &[UpdateFileChunk],
) -> std::result::Result<ApplyPatchFileUpdate, ApplyPatchError> {
unified_diff_from_chunks_with_context(path, chunks, 1)
unified_diff_from_chunks_with_context(path, chunks, /*context*/ 1)
}
pub fn unified_diff_from_chunks_with_context(

View file

@ -74,7 +74,7 @@ pub fn can_manage_artifact_runtime() -> bool {
pub(crate) fn resolve_machine_js_runtime() -> Option<JsRuntime> {
resolve_js_runtime_from_candidates(
None,
/*preferred_node_path*/ None,
system_node_runtime(),
system_electron_runtime(),
codex_app_runtime_candidates(),

View file

@ -399,7 +399,7 @@ impl Client {
let plan_type = Some(Self::map_plan_type(payload.plan_type));
let mut snapshots = vec![Self::make_rate_limit_snapshot(
Some("codex".to_string()),
None,
/*limit_name*/ None,
payload.rate_limit.flatten().map(|details| *details),
payload.credits.flatten().map(|details| *details),
plan_type,
@ -410,7 +410,7 @@ impl Client {
Some(details.metered_feature),
Some(details.limit_name),
details.rate_limit.flatten().map(|rate_limit| *rate_limit),
None,
/*credits*/ None,
plan_type,
)
}));

View file

@ -13,7 +13,7 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
config: &Config,
path: String,
) -> anyhow::Result<T> {
chatgpt_get_request_with_timeout(config, path, None).await
chatgpt_get_request_with_timeout(config, path, /*timeout*/ None).await
}
pub(crate) async fn chatgpt_get_request_with_timeout<T: DeserializeOwned>(

View file

@ -23,8 +23,11 @@ pub async fn init_chatgpt_token_from_auth(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let auth_manager =
AuthManager::new(codex_home.to_path_buf(), false, auth_credentials_store_mode);
let auth_manager = AuthManager::new(
codex_home.to_path_buf(),
/*enable_codex_api_key_env*/ false,
auth_credentials_store_mode,
);
if let Some(auth) = auth_manager.auth().await {
let token_data = auth.get_token_data()?;
set_chatgpt_token_data(token_data);

View file

@ -29,7 +29,7 @@ const DIRECTORY_CONNECTORS_TIMEOUT: Duration = Duration::from_secs(60);
async fn apps_enabled(config: &Config) -> bool {
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
/*enable_codex_api_key_env*/ false,
config.cli_auth_credentials_store_mode,
);
config.features.apps_enabled(Some(&auth_manager)).await
@ -45,13 +45,15 @@ pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
let connectors = connectors_result?;
let accessible = accessible_result?;
Ok(with_app_enabled_state(
merge_connectors_with_accessible(connectors, accessible, true),
merge_connectors_with_accessible(
connectors, accessible, /*all_connectors_loaded*/ true,
),
config,
))
}
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
list_all_connectors_with_options(config, false).await
list_all_connectors_with_options(config, /*force_refetch*/ false).await
}
pub async fn list_cached_all_connectors(config: &Config) -> Option<Vec<AppInfo>> {

View file

@ -69,7 +69,7 @@ pub async fn run_command_under_landlock(
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Landlock,
false,
/*log_denials*/ false,
)
.await
}
@ -89,7 +89,7 @@ pub async fn run_command_under_windows(
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Windows,
false,
/*log_denials*/ false,
)
.await
}
@ -131,7 +131,10 @@ async fn run_command_under_sandbox(
let sandbox_policy_cwd = cwd.clone();
let stdio_policy = StdioPolicy::Inherit;
let env = create_env(&config.permissions.shell_environment_policy, None);
let env = create_env(
&config.permissions.shell_environment_policy,
/*thread_id*/ None,
);
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
if let SandboxType::Windows = sandbox_type {
@ -223,8 +226,8 @@ async fn run_command_under_sandbox(
Some(spec) => Some(
spec.start_proxy(
config.permissions.sandbox_policy.get(),
None,
None,
/*policy_decider*/ None,
/*blocked_request_observer*/ None,
managed_network_requirements_enabled,
NetworkProxyAuditMetadata::default(),
)

View file

@ -830,7 +830,7 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> {
&mut apply_cli.config_overrides,
root_config_overrides.clone(),
);
run_apply_command(apply_cli, None).await?;
run_apply_command(apply_cli, /*cwd*/ None).await?;
}
Some(Subcommand::ResponsesApiProxy(args)) => {
reject_remote_mode_for_subcommand(root_remote.as_deref(), "responses-api-proxy")?;
@ -906,7 +906,7 @@ async fn enable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyhow
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, true)
.set_feature_enabled(feature, /*enabled*/ true)
.apply()
.await?;
println!("Enabled feature `{feature}` in config.toml.");
@ -919,7 +919,7 @@ async fn disable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyho
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, false)
.set_feature_enabled(feature, /*enabled*/ false)
.apply()
.await?;
println!("Disabled feature `{feature}` in config.toml.");

View file

@ -321,8 +321,11 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
match oauth_login_support(&transport).await {
McpOAuthLoginSupport::Supported(oauth_config) => {
println!("Detected OAuth support. Starting OAuth flow…");
let resolved_scopes =
resolve_oauth_scopes(None, None, oauth_config.discovered_scopes.clone());
let resolved_scopes = resolve_oauth_scopes(
/*explicit_scopes*/ None,
/*configured_scopes*/ None,
oauth_config.discovered_scopes.clone(),
);
perform_oauth_login_retry_without_scopes(
&name,
&oauth_config.url,
@ -330,7 +333,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
oauth_config.http_headers,
oauth_config.env_http_headers,
&resolved_scopes,
None,
/*oauth_resource*/ None,
config.mcp_oauth_callback_port,
config.mcp_oauth_callback_url.as_deref(),
)
@ -387,7 +390,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
.await
.context("failed to load configuration")?;
let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone())));
let mcp_servers = mcp_manager.effective_servers(&config, None);
let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None);
let LoginArgs { name, scopes } = login_args;
@ -438,7 +441,7 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
.await
.context("failed to load configuration")?;
let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone())));
let mcp_servers = mcp_manager.effective_servers(&config, None);
let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None);
let LogoutArgs { name } = logout_args;
@ -468,7 +471,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
.await
.context("failed to load configuration")?;
let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone())));
let mcp_servers = mcp_manager.effective_servers(&config, None);
let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None);
let mut entries: Vec<_> = mcp_servers.iter().collect();
entries.sort_by(|(a, _), (b, _)| a.cmp(b));
@ -717,7 +720,7 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
.await
.context("failed to load configuration")?;
let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone())));
let mcp_servers = mcp_manager.effective_servers(&config, None);
let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None);
let Some(server) = mcp_servers.get(&get_args.name) else {
bail!("No MCP server named '{name}' found.", name = get_args.name);

View file

@ -286,7 +286,7 @@ impl CloudRequirementsService {
.map_err(|_| {
CloudRequirementsLoadError::new(
CloudRequirementsLoadErrorCode::Timeout,
None,
/*status_code*/ None,
format!(
"timed out waiting for cloud requirements after {}s",
self.timeout.as_secs()
@ -368,7 +368,9 @@ impl CloudRequirementsService {
while attempt <= CLOUD_REQUIREMENTS_MAX_ATTEMPTS {
let contents = match self.fetcher.fetch_requirements(&auth).await {
Ok(contents) => {
emit_fetch_attempt_metric(trigger, attempt, "success", None);
emit_fetch_attempt_metric(
trigger, attempt, "success", /*status_code*/ None,
);
contents
}
Err(FetchAttemptError::Retryable(status)) => {
@ -488,7 +490,7 @@ impl CloudRequirementsService {
);
return Err(CloudRequirementsLoadError::new(
CloudRequirementsLoadErrorCode::Parse,
None,
/*status_code*/ None,
CLOUD_REQUIREMENTS_LOAD_FAILED_MESSAGE,
));
}
@ -501,7 +503,9 @@ impl CloudRequirementsService {
tracing::warn!(error = %err, "Failed to write cloud requirements cache");
}
emit_fetch_final_metric(trigger, "success", "none", attempt, None);
emit_fetch_final_metric(
trigger, "success", "none", attempt, /*status_code*/ None,
);
return Ok(requirements);
}
@ -709,7 +713,7 @@ pub fn cloud_requirements_loader(
tracing::error!(error = %err, "Cloud requirements task failed");
CloudRequirementsLoadError::new(
CloudRequirementsLoadErrorCode::Internal,
None,
/*status_code*/ None,
format!("cloud requirements load failed: {err}"),
)
})?
@ -807,7 +811,7 @@ fn emit_metric(metric_name: &str, tags: Vec<(&str, String)>) {
.iter()
.map(|(key, value)| (*key, value.as_str()))
.collect::<Vec<_>>();
let _ = metrics.counter(metric_name, 1, &tag_refs);
let _ = metrics.counter(metric_name, /*inc*/ 1, &tag_refs);
}
}

View file

@ -94,7 +94,9 @@ impl CloudBackend for HttpClient {
}
async fn apply_task(&self, id: TaskId, diff_override: Option<String>) -> Result<ApplyOutcome> {
self.apply_api().run(id, diff_override, false).await
self.apply_api()
.run(id, diff_override, /*preflight*/ false)
.await
}
async fn apply_task_preflight(
@ -102,7 +104,9 @@ impl CloudBackend for HttpClient {
id: TaskId,
diff_override: Option<String>,
) -> Result<ApplyOutcome> {
self.apply_api().run(id, diff_override, true).await
self.apply_api()
.run(id, diff_override, /*preflight*/ true)
.await
}
async fn create_task(
@ -533,8 +537,8 @@ mod api {
let _ = writeln!(
&mut log,
"stdout_tail=\n{}\nstderr_tail=\n{}",
tail(&r.stdout, 2000),
tail(&r.stderr, 2000)
tail(&r.stdout, /*max*/ 2000),
tail(&r.stderr, /*max*/ 2000)
);
let _ = writeln!(&mut log, "{summary}");
let _ = writeln!(

View file

@ -70,7 +70,10 @@ impl CloudBackend for MockClient {
}
async fn get_task_summary(&self, id: TaskId) -> Result<TaskSummary> {
let tasks = self.list_tasks(None, None, None).await?.tasks;
let tasks = self
.list_tasks(/*env*/ None, /*limit*/ None, /*cursor*/ None)
.await?
.tasks;
tasks
.into_iter()
.find(|t| t.id == id)

View file

@ -125,7 +125,7 @@ pub async fn load_tasks(
// In later milestones, add a small debounce, spinner, and error display.
let tasks = tokio::time::timeout(
Duration::from_secs(5),
backend.list_tasks(env, Some(20), None),
backend.list_tasks(env, Some(20), /*cursor*/ None),
)
.await??;
// Hide review-only tasks from the main list.

View file

@ -171,7 +171,7 @@ async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
&env_id,
&prompt,
&git_ref,
false,
/*qa_mode*/ false,
attempts,
)
.await?;
@ -827,7 +827,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let backend = Arc::clone(&backend);
let tx = tx.clone();
tokio::spawn(async move {
let res = app::load_tasks(&*backend, None).await;
let res = app::load_tasks(&*backend, /*env*/ None).await;
let _ = tx.send(app::AppEvent::TasksLoaded {
env: None,
result: res,
@ -861,7 +861,10 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let headers = util::build_chatgpt_headers().await;
// Run autodetect. If it fails, we keep using "All".
let res = crate::env_detect::autodetect_environment_id(&base_url, &headers, None).await;
let res = crate::env_detect::autodetect_environment_id(
&base_url, &headers, /*desired_label*/ None,
)
.await;
let _ = tx.send(app::AppEvent::EnvironmentAutodetected(res));
});
}
@ -1105,7 +1108,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
ov.base_can_apply = true;
ov.apply_selection_to_fields();
} else {
let mut overlay = app::DiffOverlay::new(id.clone(), title, None);
let mut overlay = app::DiffOverlay::new(id.clone(), title, /*attempt_total_hint*/ None);
{
let base = overlay.base_attempt_mut();
base.diff_lines = diff_lines.clone();
@ -1178,7 +1181,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
});
}
} else {
let mut overlay = app::DiffOverlay::new(id.clone(), title, None);
let mut overlay = app::DiffOverlay::new(id.clone(), title, /*attempt_total_hint*/ None);
{
let base = overlay.base_attempt_mut();
base.text_lines = conv.clone();
@ -1216,7 +1219,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
.as_ref()
.map(|d| d.lines().map(str::to_string).collect())
.unwrap_or_default();
let text_lines = conversation_lines(None, &attempt.messages);
let text_lines = conversation_lines(/*prompt*/ None, &attempt.messages);
ov.attempts.push(app::AttemptView {
turn_id: Some(attempt.turn_id.clone()),
status: attempt.status,
@ -1263,7 +1266,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
ov.current_view = app::DetailView::Prompt;
ov.apply_selection_to_fields();
} else {
let mut overlay = app::DiffOverlay::new(id.clone(), title, None);
let mut overlay = app::DiffOverlay::new(id.clone(), title, /*attempt_total_hint*/ None);
{
let base = overlay.base_attempt_mut();
base.text_lines = pretty;
@ -1500,9 +1503,9 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let backend = Arc::clone(&backend);
let best_of_n = page.best_of_n;
tokio::spawn(async move {
let git_ref = resolve_git_ref(None).await;
let git_ref = resolve_git_ref(/*branch_override*/ None).await;
let result = codex_cloud_tasks_client::CloudBackend::create_task(&*backend, &env, &text, &git_ref, false, best_of_n).await;
let result = codex_cloud_tasks_client::CloudBackend::create_task(&*backend, &env, &text, &git_ref, /*qa_mode*/ false, best_of_n).await;
let evt = match result {
Ok(ok) => app::AppEvent::NewTaskSubmitted(Ok(ok)),
Err(e) => app::AppEvent::NewTaskSubmitted(Err(format!("{e}"))),
@ -1685,11 +1688,11 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
needs_redraw = true;
}
KeyCode::Down | KeyCode::Char('j') => {
if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_by(1); }
if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_by(/*delta*/ 1); }
needs_redraw = true;
}
KeyCode::Up | KeyCode::Char('k') => {
if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_by(-1); }
if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_by(/*delta*/ -1); }
needs_redraw = true;
}
KeyCode::PageDown | KeyCode::Char(' ') => {
@ -1721,7 +1724,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
KeyCode::PageUp => { if let Some(m) = app.env_modal.as_mut() { let step = 10usize; m.selected = m.selected.saturating_sub(step); } needs_redraw = true; }
KeyCode::Char('n') => {
if app.env_filter.is_none() {
app.new_task = Some(crate::new_task::NewTaskPage::new(None, app.best_of_n));
app.new_task = Some(crate::new_task::NewTaskPage::new(/*env_id*/ None, app.best_of_n));
} else {
app.new_task = Some(crate::new_task::NewTaskPage::new(app.env_filter.clone(), app.best_of_n));
}

View file

@ -30,6 +30,6 @@ impl NewTaskPage {
impl Default for NewTaskPage {
fn default() -> Self {
Self::new(None, 1)
Self::new(/*env_id*/ None, /*best_of_n*/ 1)
}
}

View file

@ -582,7 +582,10 @@ fn style_conversation_lines(
speaker = Some(ConversationSpeaker::User);
in_code = false;
bullet_indent = None;
styled.push(conversation_header_line(ConversationSpeaker::User, None));
styled.push(conversation_header_line(
ConversationSpeaker::User,
/*attempt*/ None,
));
last_src = Some(src_idx);
continue;
}

View file

@ -64,7 +64,7 @@ pub async fn load_auth_manager() -> Option<AuthManager> {
let config = Config::load_with_cli_overrides(Vec::new()).await.ok()?;
Some(AuthManager::new(
config.codex_home,
false,
/*enable_codex_api_key_env*/ false,
config.cli_auth_credentials_store_mode,
))
}

View file

@ -44,9 +44,15 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
) -> Result<(Vec<ModelInfo>, Option<String>), ApiError> {
let resp = self
.session
.execute_with(Method::GET, Self::path(), extra_headers, None, |req| {
Self::append_client_version_query(req, client_version);
})
.execute_with(
Method::GET,
Self::path(),
extra_headers,
/*body*/ None,
|req| {
Self::append_client_version_query(req, client_version);
},
)
.await?;
let header_etag = resp

View file

@ -20,7 +20,7 @@ impl Display for RateLimitError {
/// Parses the default Codex rate-limit header family into a `RateLimitSnapshot`.
pub fn parse_default_rate_limit(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
parse_rate_limit_for_limit(headers, None)
parse_rate_limit_for_limit(headers, /*limit_id*/ None)
}
/// Parses all known rate-limit header families into update records keyed by limit id.

View file

@ -45,7 +45,12 @@ pub fn stream_from_fixture(
let reader = std::io::Cursor::new(content);
let stream = ReaderStream::new(reader).map_err(|err| TransportError::Network(err.to_string()));
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
tokio::spawn(process_sse(Box::pin(stream), tx_event, idle_timeout, None));
tokio::spawn(process_sse(
Box::pin(stream),
tx_event,
idle_timeout,
/*telemetry*/ None,
));
Ok(ResponseStream { rx_event })
}

View file

@ -38,7 +38,7 @@ fn derive_for_struct(input: &DeriveInput, data: &DataStruct) -> TokenStream {
let mut registrations = Vec::new();
for field in &named.named {
if let Some(reason) = experimental_reason(&field.attrs) {
let expr = experimental_presence_expr(field, false);
let expr = experimental_presence_expr(field, /*tuple_struct*/ false);
checks.push(quote! {
if #expr {
return Some(#reason);

View file

@ -92,20 +92,23 @@ impl Default for ConfigRequirements {
Self {
approval_policy: ConstrainedWithSource::new(
Constrained::allow_any_from_default(),
None,
/*source*/ None,
),
sandbox_policy: ConstrainedWithSource::new(
Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
None,
/*source*/ None,
),
web_search_mode: ConstrainedWithSource::new(
Constrained::allow_any(WebSearchMode::Cached),
None,
/*source*/ None,
),
feature_requirements: None,
mcp_servers: None,
exec_policy: None,
enforce_residency: ConstrainedWithSource::new(Constrained::allow_any(None), None),
enforce_residency: ConstrainedWithSource::new(
Constrained::allow_any(/*initial_value*/ None),
/*source*/ None,
),
network: None,
}
}
@ -508,7 +511,10 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
})?;
ConstrainedWithSource::new(constrained, Some(requirement_source))
}
None => ConstrainedWithSource::new(Constrained::allow_any_from_default(), None),
None => ConstrainedWithSource::new(
Constrained::allow_any_from_default(),
/*source*/ None,
),
};
// TODO(gt): `ConfigRequirementsToml` should let the author specify the
@ -559,7 +565,10 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
ConstrainedWithSource::new(constrained, Some(requirement_source))
}
None => {
ConstrainedWithSource::new(Constrained::allow_any(default_sandbox_policy), None)
ConstrainedWithSource::new(
Constrained::allow_any(default_sandbox_policy),
/*source*/ None,
)
}
};
let exec_policy = match rules {
@ -612,7 +621,10 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
})?;
ConstrainedWithSource::new(constrained, Some(requirement_source))
}
None => ConstrainedWithSource::new(Constrained::allow_any(WebSearchMode::Cached), None),
None => ConstrainedWithSource::new(
Constrained::allow_any(WebSearchMode::Cached),
/*source*/ None,
),
};
let feature_requirements =
feature_requirements.filter(|requirements| !requirements.value.is_empty());
@ -638,7 +650,10 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
})?;
ConstrainedWithSource::new(constrained, Some(requirement_source))
}
None => ConstrainedWithSource::new(Constrained::allow_any(None), None),
None => ConstrainedWithSource::new(
Constrained::allow_any(/*initial_value*/ None),
/*source*/ None,
),
};
let network = network.map(|sourced_network| {
let Sourced { value, source } = sourced_network;

View file

@ -142,7 +142,10 @@ pub async fn first_layer_config_error<T: DeserializeOwned>(
// per-file error to point users at a specific file and range rather than an
// opaque merged-layer failure.
first_layer_config_error_for_entries::<T, _>(
layers.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false),
layers.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
),
config_toml_file,
)
.await

View file

@ -211,7 +211,10 @@ impl ConfigLayerStack {
pub fn effective_config(&self) -> TomlValue {
let mut merged = TomlValue::Table(toml::map::Map::new());
for layer in self.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false) {
for layer in self.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
merge_toml_values(&mut merged, &layer.config);
}
merged
@ -221,7 +224,10 @@ impl ConfigLayerStack {
let mut origins = HashMap::new();
let mut path = Vec::new();
for layer in self.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false) {
for layer in self.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
record_origins(&layer.config, &layer.metadata(), &mut path, &mut origins);
}
@ -231,7 +237,10 @@ impl ConfigLayerStack {
/// Returns the highest-precedence to lowest-precedence layers, so
/// `ConfigLayerSource::SessionFlags` would be first, if present.
pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> {
self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst, false)
self.get_layers(
ConfigLayerStackOrdering::HighestPrecedenceFirst,
/*include_disabled*/ false,
)
}
/// Returns the highest-precedence to lowest-precedence layers, so

View file

@ -186,7 +186,7 @@ impl AgentControl {
initial_history,
self.clone(),
session_source,
false,
/*persist_extended_history*/ false,
inherited_shell_snapshot,
)
.await?
@ -196,8 +196,8 @@ impl AgentControl {
config,
self.clone(),
session_source,
false,
None,
/*persist_extended_history*/ false,
/*metrics_service_name*/ None,
inherited_shell_snapshot,
)
.await?

View file

@ -138,7 +138,11 @@ impl Guards {
active_agents.used_agent_nicknames.clear();
active_agents.nickname_reset_count += 1;
if let Some(metrics) = codex_otel::metrics::global() {
let _ = metrics.counter("codex.multi_agent.nickname_pool_reset", 1, &[]);
let _ = metrics.counter(
"codex.multi_agent.nickname_pool_reset",
/*inc*/ 1,
&[],
);
}
format_agent_nickname(
names.choose(&mut rand::rng())?,
@ -179,7 +183,7 @@ pub(crate) struct SpawnReservation {
impl SpawnReservation {
pub(crate) fn reserve_agent_nickname(&mut self, names: &[&str]) -> Result<String> {
self.reserve_agent_nickname_with_preference(names, None)
self.reserve_agent_nickname_with_preference(names, /*preferred*/ None)
}
pub(crate) fn reserve_agent_nickname_with_preference(
@ -198,7 +202,7 @@ impl SpawnReservation {
}
pub(crate) fn commit(self, thread_id: ThreadId) {
self.commit_with_agent_nickname(thread_id, None);
self.commit_with_agent_nickname(thread_id, /*agent_nickname*/ None);
}
pub(crate) fn commit_with_agent_nickname(

View file

@ -231,7 +231,10 @@ mod reload {
fn existing_layers(config: &Config) -> Vec<ConfigLayerEntry> {
config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ true,
)
.into_iter()
.cloned()
.collect()

View file

@ -194,7 +194,11 @@ impl CodexAuth {
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<Self>> {
load_auth(codex_home, false, auth_credentials_store_mode)
load_auth(
codex_home,
/*enable_codex_api_key_env*/ false,
auth_credentials_store_mode,
)
}
pub fn auth_mode(&self) -> AuthMode {
@ -457,7 +461,7 @@ pub fn load_auth_dot_json(
pub fn enforce_login_restrictions(config: &Config) -> std::io::Result<()> {
let Some(auth) = load_auth(
&config.codex_home,
true,
/*enable_codex_api_key_env*/ true,
config.cli_auth_credentials_store_mode,
)?
else {

View file

@ -564,7 +564,7 @@ impl ModelClient {
auth_context.recovery_mode,
auth_context.recovery_phase,
request_route_telemetry.endpoint,
false,
/*connection_reused*/ false,
response_debug.request_id.as_deref(),
response_debug.cf_ray.as_deref(),
response_debug.auth_error.as_deref(),
@ -796,9 +796,11 @@ impl ModelClientSession {
let Some(last_response) = self.get_last_response() else {
return ResponsesWsRequest::ResponseCreate(payload);
};
let Some(incremental_items) =
self.get_incremental_items(request, Some(&last_response), true)
else {
let Some(incremental_items) = self.get_incremental_items(
request,
Some(&last_response),
/*allow_empty_delta*/ true,
) else {
return ResponsesWsRequest::ResponseCreate(payload);
};
@ -846,13 +848,14 @@ impl ModelClientSession {
client_setup.api_provider,
client_setup.api_auth,
Some(Arc::clone(&self.turn_state)),
None,
/*turn_metadata_header*/ None,
auth_context,
RequestRouteTelemetry::for_endpoint(RESPONSES_ENDPOINT),
)
.await?;
self.websocket_session.connection = Some(connection);
self.websocket_session.set_connection_reused(false);
self.websocket_session
.set_connection_reused(/*connection_reused*/ false);
Ok(())
}
/// Returns a websocket connection for this turn.
@ -906,9 +909,11 @@ impl ModelClientSession {
)
.await?;
self.websocket_session.connection = Some(new_conn);
self.websocket_session.set_connection_reused(false);
self.websocket_session
.set_connection_reused(/*connection_reused*/ false);
} else {
self.websocket_session.set_connection_reused(true);
self.websocket_session
.set_connection_reused(/*connection_reused*/ true);
}
self.websocket_session
@ -1202,7 +1207,7 @@ impl ModelClientSession {
summary,
service_tier,
turn_metadata_header,
true,
/*warmup*/ true,
)
.await
{
@ -1255,7 +1260,7 @@ impl ModelClientSession {
summary,
service_tier,
turn_metadata_header,
false,
/*warmup*/ false,
)
.await?
{
@ -1297,14 +1302,15 @@ impl ModelClientSession {
warn!("falling back to HTTP");
session_telemetry.counter(
"codex.transport.fallback_to_http",
1,
/*inc*/ 1,
&[("from_wire_api", "responses_websocket")],
);
self.websocket_session.connection = None;
self.websocket_session.last_request = None;
self.websocket_session.last_response_rx = None;
self.websocket_session.set_connection_reused(false);
self.websocket_session
.set_connection_reused(/*connection_reused*/ false);
}
activated
}
@ -1527,7 +1533,7 @@ async fn handle_unauthorized(
debug.cf_ray.as_deref(),
debug.auth_error.as_deref(),
debug.auth_error_code.as_deref(),
None,
/*recovery_reason*/ None,
step_result.auth_state_changed(),
);
emit_feedback_auth_recovery_tags(
@ -1550,8 +1556,8 @@ async fn handle_unauthorized(
debug.cf_ray.as_deref(),
debug.auth_error.as_deref(),
debug.auth_error_code.as_deref(),
None,
None,
/*recovery_reason*/ None,
/*auth_state_changed*/ None,
);
emit_feedback_auth_recovery_tags(
mode,
@ -1573,8 +1579,8 @@ async fn handle_unauthorized(
debug.cf_ray.as_deref(),
debug.auth_error.as_deref(),
debug.auth_error_code.as_deref(),
None,
None,
/*recovery_reason*/ None,
/*auth_state_changed*/ None,
);
emit_feedback_auth_recovery_tags(
mode,
@ -1607,7 +1613,7 @@ async fn handle_unauthorized(
debug.auth_error.as_deref(),
debug.auth_error_code.as_deref(),
recovery_reason,
None,
/*auth_state_changed*/ None,
);
emit_feedback_auth_recovery_tags(
mode,

View file

@ -630,7 +630,7 @@ impl Codex {
/// Submit the `op` wrapped in a `Submission` with a unique ID.
pub async fn submit(&self, op: Op) -> CodexResult<String> {
self.submit_with_trace(op, None).await
self.submit_with_trace(op, /*trace*/ None).await
}
pub async fn submit_with_trace(
@ -855,9 +855,11 @@ impl TurnContext {
};
config.model_reasoning_effort = reasoning_effort;
let collaboration_mode =
self.collaboration_mode
.with_updates(Some(model.clone()), Some(reasoning_effort), None);
let collaboration_mode = self.collaboration_mode.with_updates(
Some(model.clone()),
Some(reasoning_effort),
/*developer_instructions*/ None,
);
let features = self.features.clone();
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
@ -1590,7 +1592,7 @@ impl Session {
config.features.emit_metrics(&session_telemetry);
session_telemetry.counter(
THREAD_STARTED_METRIC,
1,
/*inc*/ 1,
&[(
"is_git",
if get_git_repo_root(&session_configuration.cwd).is_some() {
@ -1722,7 +1724,8 @@ impl Session {
(None, None)
};
let mut hook_shell_argv = default_shell.derive_exec_args("", false);
let mut hook_shell_argv =
default_shell.derive_exec_args("", /*use_login_shell*/ false);
let hook_shell_program = hook_shell_argv.remove(0);
let _ = hook_shell_argv.pop();
let hooks = Hooks::new(HooksConfig {
@ -2072,7 +2075,8 @@ impl Session {
InitialHistory::New => {
// Defer initial context insertion until the first real turn starts so
// turn/start overrides can be merged before we write model-visible context.
self.set_previous_turn_settings(None).await;
self.set_previous_turn_settings(/*previous_turn_settings*/ None)
.await;
}
InitialHistory::Resumed(resumed_history) => {
let rollout_items = resumed_history.history;
@ -2449,7 +2453,7 @@ impl Session {
startup_turn_context.as_ref(),
&[],
&HashSet::new(),
None,
/*skills_outcome*/ None,
&startup_cancellation_token,
)
.await?;
@ -2535,8 +2539,13 @@ impl Session {
let state = self.state.lock().await;
state.session_configuration.clone()
};
self.new_turn_from_configuration(sub_id, session_configuration, None, false)
.await
self.new_turn_from_configuration(
sub_id,
session_configuration,
/*final_output_json_schema*/ None,
/*sandbox_policy_changed*/ false,
)
.await
}
async fn build_settings_update_items(
@ -3284,7 +3293,7 @@ impl Session {
pub(crate) async fn record_model_warning(&self, message: impl Into<String>, ctx: &TurnContext) {
self.services
.session_telemetry
.counter("codex.model_warning", 1, &[]);
.counter("codex.model_warning", /*inc*/ 1, &[]);
let item = ResponseItem::Message {
id: None,
role: "user".to_string(),
@ -4186,7 +4195,7 @@ async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiv
state.session_configuration.collaboration_mode.with_updates(
model.clone(),
effort,
None,
/*developer_instructions*/ None,
)
};
handlers::override_turn_context(
@ -4536,7 +4545,9 @@ mod handlers {
current_context.session_telemetry.user_prompt(&items);
// Attempt to inject input into current task.
if let Err(SteerInputError::NoActiveTurn(items)) = sess.steer_input(items, None).await {
if let Err(SteerInputError::NoActiveTurn(items)) =
sess.steer_input(items, /*expected_turn_id*/ None).await
{
sess.refresh_mcp_servers_if_requested(&current_context)
.await;
let regular_task = sess.take_startup_regular_task().await.unwrap_or_default();
@ -5281,7 +5292,7 @@ async fn spawn_review_thread(
sess.services.shell_zsh_path.as_ref(),
sess.services.main_execve_wrapper_exe.as_ref(),
)
.with_web_search_config(None)
.with_web_search_config(/*web_search_config*/ None)
.with_allow_login_shell(config.permissions.allow_login_shell)
.with_agent_roles(config.agent_roles.clone());
@ -5964,7 +5975,7 @@ pub(crate) async fn run_turn(
}
Err(e) => {
info!("Turn error: {e:#}");
let event = EventMsg::Error(e.to_error_event(None));
let event = EventMsg::Error(e.to_error_event(/*message_prefix*/ None));
sess.send_event(&turn_context, event).await;
// let the user continue the conversation
break;
@ -7031,7 +7042,8 @@ async fn handle_assistant_item_done_in_plan_mode(
{
maybe_complete_plan_item_from_message(sess, turn_context, state, item).await;
if let Some(turn_item) = handle_non_tool_response_item(sess, turn_context, item, true).await
if let Some(turn_item) =
handle_non_tool_response_item(sess, turn_context, item, /*plan_mode*/ true).await
{
emit_turn_item_in_plan_mode(
sess,
@ -7044,7 +7056,7 @@ async fn handle_assistant_item_done_in_plan_mode(
}
record_completed_response_item(sess, turn_context, item).await;
if let Some(agent_message) = last_assistant_message_from_item(item, true) {
if let Some(agent_message) = last_assistant_message_from_item(item, /*plan_mode*/ true) {
*last_agent_message = Some(agent_message);
}
return true;
@ -7415,7 +7427,7 @@ async fn try_run_sampling_request(
pub(super) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option<String> {
for item in responses.iter().rev() {
if let Some(message) = last_assistant_message_from_item(item, false) {
if let Some(message) = last_assistant_message_from_item(item, /*plan_mode*/ false) {
return Some(message);
}
}

View file

@ -481,7 +481,7 @@ async fn handle_exec_approval(
parent_session,
&approval_id_for_op,
cancel_token,
None,
/*review_cancel_token*/ None,
)
.await
};
@ -587,7 +587,7 @@ async fn handle_patch_approval(
parent_session,
&approval_id,
cancel_token,
None,
/*review_cancel_token*/ None,
)
.await
};
@ -675,7 +675,7 @@ async fn maybe_auto_review_mcp_request_user_input(
Arc::clone(parent_session),
Arc::clone(parent_ctx),
build_guardian_mcp_tool_review_request(&event.call_id, &invocation, metadata.as_ref()),
None,
/*retry_reason*/ None,
review_cancel.clone(),
);
let decision = await_approval_with_cancel(

View file

@ -173,7 +173,7 @@ impl CodexThread {
if was_zero {
self.codex
.session
.set_out_of_band_elicitation_pause_state(true);
.set_out_of_band_elicitation_pause_state(/*paused*/ true);
}
Ok(*guard)
@ -192,7 +192,7 @@ impl CodexThread {
if now_zero {
self.codex
.session
.set_out_of_band_elicitation_pause_state(false);
.set_out_of_band_elicitation_pause_state(/*paused*/ false);
}
Ok(*guard)

View file

@ -163,7 +163,7 @@ async fn run_compact_task_inner(
continue;
}
sess.set_total_tokens_full(turn_context.as_ref()).await;
let event = EventMsg::Error(e.to_error_event(None));
let event = EventMsg::Error(e.to_error_event(/*message_prefix*/ None));
sess.send_event(&turn_context, event).await;
return Err(e);
}
@ -180,7 +180,7 @@ async fn run_compact_task_inner(
tokio::time::sleep(delay).await;
continue;
} else {
let event = EventMsg::Error(e.to_error_event(None));
let event = EventMsg::Error(e.to_error_event(/*message_prefix*/ None));
sess.send_event(&turn_context, event).await;
return Err(e);
}

View file

@ -101,7 +101,7 @@ async fn run_remote_compact_task_inner_impl(
turn_context.as_ref(),
&prompt_input,
&HashSet::new(),
None,
/*skills_outcome*/ None,
&CancellationToken::new(),
)
.await?;

View file

@ -19,8 +19,10 @@ pub(crate) fn load_agent_roles(
config_layer_stack: &ConfigLayerStack,
startup_warnings: &mut Vec<String>,
) -> std::io::Result<BTreeMap<String, AgentRoleConfig>> {
let layers =
config_layer_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false);
let layers = config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
);
if layers.is_empty() {
return load_agent_roles_without_layers(cfg);
}
@ -450,13 +452,14 @@ fn discover_agent_roles_in_dir(
if declared_role_files.contains(&agent_file) {
continue;
}
let parsed_file = match read_resolved_agent_role_file(&agent_file, None) {
Ok(parsed_file) => parsed_file,
Err(err) => {
push_agent_role_warning(startup_warnings, err);
continue;
}
};
let parsed_file =
match read_resolved_agent_role_file(&agent_file, /*role_name_hint*/ None) {
Ok(parsed_file) => parsed_file,
Err(err) => {
push_agent_role_warning(startup_warnings, err);
continue;
}
};
let role_name = parsed_file.role_name;
if roles.contains_key(&role_name) {
push_agent_role_warning(

View file

@ -81,11 +81,11 @@ impl ManagedFeatures {
}
pub fn enable(&mut self, feature: Feature) -> ConstraintResult<()> {
self.set_enabled(feature, true)
self.set_enabled(feature, /*enabled*/ true)
}
pub fn disable(&mut self, feature: Feature) -> ConstraintResult<()> {
self.set_enabled(feature, false)
self.set_enabled(feature, /*enabled*/ false)
}
}
@ -321,7 +321,12 @@ pub(crate) fn validate_feature_requirements_in_config_toml(
})
}
validate_profile(cfg, None, &ConfigProfile::default(), feature_requirements)?;
validate_profile(
cfg,
/*profile_name*/ None,
&ConfigProfile::default(),
feature_requirements,
)?;
for (profile_name, profile) in &cfg.profiles {
validate_profile(cfg, Some(profile_name), profile, feature_requirements)?;
}

View file

@ -1831,9 +1831,10 @@ fn resolve_permission_config_syntax(
}
let mut selection = None;
for layer in
config_layer_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false)
{
for layer in config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
let Ok(layer_selection) = layer.config.clone().try_into::<PermissionSelectionToml>() else {
continue;
};

View file

@ -77,7 +77,7 @@ impl NetworkProxySpec {
}
pub fn proxy_host_and_port(&self) -> String {
host_and_port_from_network_addr(&self.config.network.proxy_url, 3128)
host_and_port_from_network_addr(&self.config.network.proxy_url, /*default_port*/ 3128)
}
pub fn socks_enabled(&self) -> bool {

View file

@ -281,9 +281,11 @@ fn parse_special_path(path: &str) -> Option<FileSystemSpecialPath> {
match path {
":root" => Some(FileSystemSpecialPath::Root),
":minimal" => Some(FileSystemSpecialPath::Minimal),
":project_roots" => Some(FileSystemSpecialPath::project_roots(None)),
":project_roots" => Some(FileSystemSpecialPath::project_roots(/*subpath*/ None)),
":tmpdir" => Some(FileSystemSpecialPath::Tmpdir),
_ if path.starts_with(':') => Some(FileSystemSpecialPath::unknown(path, None)),
_ if path.starts_with(':') => {
Some(FileSystemSpecialPath::unknown(path, /*subpath*/ None))
}
_ => None,
}
}

View file

@ -183,7 +183,10 @@ impl ConfigService {
origins: layers.origins(),
layers: params.include_layers.then(|| {
layers
.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst, true)
.get_layers(
ConfigLayerStackOrdering::HighestPrecedenceFirst,
/*include_disabled*/ true,
)
.iter()
.map(|layer| layer.as_layer())
.collect()

View file

@ -56,12 +56,13 @@ pub(super) async fn load_config_layers_internal(
managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home)),
)?;
let managed_config = read_config_from_path(&managed_config_path, false)
.await?
.map(|managed_config| MangedConfigFromFile {
managed_config,
file: managed_config_path.clone(),
});
let managed_config =
read_config_from_path(&managed_config_path, /*log_missing_as_info*/ false)
.await?
.map(|managed_config| MangedConfigFromFile {
managed_config,
file: managed_config_path.clone(),
});
#[cfg(target_os = "macos")]
let managed_preferences =

View file

@ -209,7 +209,7 @@ pub async fn load_config_layers_state(
return Err(io_error_from_config_error(
io::ErrorKind::InvalidData,
config_error,
None,
/*source*/ None,
));
}
return Err(err);
@ -853,15 +853,20 @@ async fn load_project_layers(
&dot_codex_abs,
&layer_dir,
TomlValue::Table(toml::map::Map::new()),
true,
/*config_toml_exists*/ true,
));
continue;
}
};
let config =
resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?;
let entry =
project_layer_entry(trust_context, &dot_codex_abs, &layer_dir, config, true);
let entry = project_layer_entry(
trust_context,
&dot_codex_abs,
&layer_dir,
config,
/*config_toml_exists*/ true,
);
layers.push(entry);
}
Err(err) => {
@ -874,7 +879,7 @@ async fn load_project_layers(
&dot_codex_abs,
&layer_dir,
TomlValue::Table(toml::map::Map::new()),
false,
/*config_toml_exists*/ false,
));
} else {
let config_file_display = config_file.as_path().display();

View file

@ -104,9 +104,11 @@ pub async fn list_accessible_connectors_from_mcp_tools(
config: &Config,
) -> anyhow::Result<Vec<AppInfo>> {
Ok(
list_accessible_connectors_from_mcp_tools_with_options_and_status(config, false)
.await?
.connectors,
list_accessible_connectors_from_mcp_tools_with_options_and_status(
config, /*force_refetch*/ false,
)
.await?
.connectors,
)
}
@ -186,7 +188,12 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status(
});
}
let mcp_servers = with_codex_apps_mcp(HashMap::new(), true, auth.as_ref(), config);
let mcp_servers = with_codex_apps_mcp(
HashMap::new(),
/*connectors_enabled*/ true,
auth.as_ref(),
config,
);
if mcp_servers.is_empty() {
return Ok(AccessibleConnectorsStatus {
connectors: Vec::new(),
@ -408,7 +415,7 @@ async fn list_directory_connectors_for_tool_suggest_with_auth(
codex_connectors::list_all_connectors_with_options(
cache_key,
is_workspace_account,
false,
/*force_refetch*/ false,
|path| {
let access_token = access_token.clone();
let account_id = account_id.clone();
@ -459,7 +466,7 @@ async fn chatgpt_get_request_with_token<T: DeserializeOwned>(
fn auth_manager_from_config(config: &Config) -> std::sync::Arc<AuthManager> {
AuthManager::shared(
config.codex_home.clone(),
false,
/*enable_codex_api_key_env*/ false,
config.cli_auth_credentials_store_mode,
)
}

View file

@ -55,7 +55,9 @@ impl ContextManager {
pub(crate) fn new() -> Self {
Self {
items: Vec::new(),
token_info: TokenUsageInfo::new_or_append(&None, &None, None),
token_info: TokenUsageInfo::new_or_append(
&None, &None, /*model_context_window*/ None,
),
reference_context_item: None,
}
}

View file

@ -97,7 +97,7 @@ pub fn originator() -> Originator {
}
if std::env::var(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR).is_ok() {
let originator = get_originator_value(None);
let originator = get_originator_value(/*provided*/ None);
if let Ok(mut guard) = ORIGINATOR.write() {
match guard.as_ref() {
Some(originator) => return originator.clone(),
@ -107,7 +107,7 @@ pub fn originator() -> Originator {
return originator;
}
get_originator_value(None)
get_originator_value(/*provided*/ None)
}
pub fn is_first_party_originator(originator_value: &str) -> bool {

View file

@ -82,7 +82,14 @@ impl EnvironmentContext {
} else {
before_network
};
EnvironmentContext::new(cwd, shell.clone(), current_date, timezone, network, None)
EnvironmentContext::new(
cwd,
shell.clone(),
current_date,
timezone,
network,
/*subagents*/ None,
)
}
pub fn from_turn_context(turn_context: &TurnContext, shell: &Shell) -> Self {
@ -92,7 +99,7 @@ impl EnvironmentContext {
turn_context.current_date.clone(),
turn_context.timezone.clone(),
Self::network_from_turn_context(turn_context),
None,
/*subagents*/ None,
)
}
@ -103,7 +110,7 @@ impl EnvironmentContext {
turn_context_item.current_date.clone(),
turn_context_item.timezone.clone(),
Self::network_from_turn_context_item(turn_context_item),
None,
/*subagents*/ None,
)
}

View file

@ -877,12 +877,12 @@ async fn consume_truncated_output(
let stdout_handle = tokio::spawn(read_capped(
BufReader::new(stdout_reader),
stdout_stream.clone(),
false,
/*is_stderr*/ false,
));
let stderr_handle = tokio::spawn(read_capped(
BufReader::new(stderr_reader),
stdout_stream.clone(),
true,
/*is_stderr*/ true,
));
let (exit_status, timed_out) = tokio::select! {

View file

@ -449,7 +449,10 @@ pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy,
// from each layer, so that higher-precedence layers can override
// rules defined in lower-precedence ones.
let mut policy_paths = Vec::new();
for layer in config_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false) {
for layer in config_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
if let Some(config_folder) = layer.config_folder() {
#[expect(clippy::expect_used)]
let policy_dir = config_folder.join(RULES_DIR_NAME).expect("safe join");

View file

@ -60,7 +60,7 @@ impl ExternalAgentConfigService {
) -> io::Result<Vec<ExternalAgentConfigMigrationItem>> {
let mut items = Vec::new();
if params.include_home {
self.detect_migrations(None, &mut items)?;
self.detect_migrations(/*repo_root*/ None, &mut items)?;
}
for cwd in params.cwds.as_deref().unwrap_or(&[]) {
@ -81,7 +81,7 @@ impl ExternalAgentConfigService {
emit_migration_metric(
EXTERNAL_AGENT_CONFIG_IMPORT_METRIC,
ExternalAgentConfigMigrationItemType::Config,
None,
/*skills_count*/ None,
);
}
ExternalAgentConfigMigrationItemType::Skills => {
@ -97,7 +97,7 @@ impl ExternalAgentConfigService {
emit_migration_metric(
EXTERNAL_AGENT_CONFIG_IMPORT_METRIC,
ExternalAgentConfigMigrationItemType::AgentsMd,
None,
/*skills_count*/ None,
);
}
ExternalAgentConfigMigrationItemType::McpServerConfig => {}
@ -153,7 +153,7 @@ impl ExternalAgentConfigService {
emit_migration_metric(
EXTERNAL_AGENT_CONFIG_DETECT_METRIC,
ExternalAgentConfigMigrationItemType::Config,
None,
/*skills_count*/ None,
);
}
}
@ -210,7 +210,7 @@ impl ExternalAgentConfigService {
emit_migration_metric(
EXTERNAL_AGENT_CONFIG_DETECT_METRIC,
ExternalAgentConfigMigrationItemType::AgentsMd,
None,
/*skills_count*/ None,
);
}
@ -684,7 +684,7 @@ fn emit_migration_metric(
.iter()
.map(|(key, value)| (*key, value.as_str()))
.collect::<Vec<_>>();
let _ = metrics.counter(metric_name, 1, &tag_refs);
let _ = metrics.counter(metric_name, /*inc*/ 1, &tag_refs);
}
#[cfg(test)]

View file

@ -341,7 +341,7 @@ impl Features {
if self.enabled(feature.id) != feature.default_enabled {
otel.counter(
"codex.feature.state",
1,
/*inc*/ 1,
&[
("feature", feature.key),
("value", &self.enabled(feature.id).to_string()),

View file

@ -198,7 +198,7 @@ pub(crate) fn guardian_approval_request_to_json(
*sandbox_permissions,
additional_permissions.as_ref(),
justification.as_ref(),
None,
/*tty*/ None,
),
GuardianApprovalRequest::ExecCommand {
id: _,

View file

@ -221,7 +221,7 @@ pub(crate) async fn review_approval_request(
Arc::clone(turn),
request,
retry_reason,
None,
/*external_cancel*/ None,
)
.await
}

View file

@ -266,7 +266,7 @@ impl GuardianReviewSessionManager {
params.spawn_config.clone(),
next_reuse_key.clone(),
spawn_cancel_token.clone(),
None,
/*initial_history*/ None,
)),
)
.await
@ -297,7 +297,12 @@ impl GuardianReviewSessionManager {
if trunk.reuse_key != next_reuse_key {
return self
.run_ephemeral_review(params, next_reuse_key, deadline, None)
.run_ephemeral_review(
params,
next_reuse_key,
deadline,
/*initial_history*/ None,
)
.await;
}

View file

@ -43,7 +43,7 @@ where
network_sandbox_policy,
sandbox_policy_cwd,
use_legacy_landlock,
allow_network_for_proxy(false),
allow_network_for_proxy(/*enforce_managed_network*/ false),
);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(SpawnChildRequest {

View file

@ -252,7 +252,7 @@ fn effective_mcp_servers(
pub async fn collect_mcp_snapshot(config: &Config) -> McpListToolsResponseEvent {
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
/*enable_codex_api_key_env*/ false,
config.cli_auth_credentials_store_mode,
);
let auth = auth_manager.auth().await;

View file

@ -239,7 +239,7 @@ pub(crate) async fn maybe_install_mcp_dependencies(
.await;
let resolved_scopes = resolve_oauth_scopes(
None,
/*explicit_scopes*/ None,
server_config.scopes.clone(),
oauth_config.discovered_scopes.clone(),
);

View file

@ -1585,7 +1585,9 @@ async fn list_tools_for_client_uncached(
client: &Arc<RmcpClient>,
timeout: Option<Duration>,
) -> Result<Vec<ToolInfo>> {
let resp = client.list_tools_with_connector_ids(None, timeout).await?;
let resp = client
.list_tools_with_connector_ids(/*params*/ None, timeout)
.await?;
let tools = resp
.tools
.into_iter()

View file

@ -108,13 +108,15 @@ pub(crate) async fn handle_mcp_tool_call(
&call_id,
invocation,
"MCP tool call blocked by app configuration".to_string(),
false,
/*already_started*/ false,
)
.await;
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.session_telemetry
.counter("codex.mcp.call", 1, &[("status", status)]);
turn_context.session_telemetry.counter(
"codex.mcp.call",
/*inc*/ 1,
&[("status", status)],
);
return CallToolResult::from_result(result);
}
let request_meta = build_mcp_tool_call_request_meta(&server, metadata.as_ref());
@ -190,7 +192,7 @@ pub(crate) async fn handle_mcp_tool_call(
&call_id,
invocation,
message,
true,
/*already_started*/ true,
)
.await
}
@ -202,7 +204,7 @@ pub(crate) async fn handle_mcp_tool_call(
&call_id,
invocation,
message,
true,
/*already_started*/ true,
)
.await
}
@ -213,16 +215,18 @@ pub(crate) async fn handle_mcp_tool_call(
&call_id,
invocation,
message,
true,
/*already_started*/ true,
)
.await
}
};
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.session_telemetry
.counter("codex.mcp.call", 1, &[("status", status)]);
turn_context.session_telemetry.counter(
"codex.mcp.call",
/*inc*/ 1,
&[("status", status)],
);
return CallToolResult::from_result(result);
}
@ -263,7 +267,7 @@ pub(crate) async fn handle_mcp_tool_call(
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.session_telemetry
.counter("codex.mcp.call", 1, &[("status", status)]);
.counter("codex.mcp.call", /*inc*/ 1, &[("status", status)]);
CallToolResult::from_result(result)
}

View file

@ -97,7 +97,7 @@ pub(in crate::memories) async fn run(session: &Arc<Session>, config: &Config) {
if claimed_candidates.is_empty() {
session.services.session_telemetry.counter(
metrics::MEMORY_PHASE_ONE_JOBS,
1,
/*inc*/ 1,
&[("status", "skipped_no_candidates")],
);
return;
@ -211,7 +211,7 @@ async fn claim_startup_jobs(
warn!("state db claim_stage1_jobs_for_startup failed during memories startup: {err}");
session.services.session_telemetry.counter(
metrics::MEMORY_PHASE_ONE_JOBS,
1,
/*inc*/ 1,
&[("status", "failed_claim")],
);
None

View file

@ -61,7 +61,7 @@ pub(super) async fn run(session: &Arc<Session>, config: Arc<Config>) {
Err(e) => {
session.services.session_telemetry.counter(
metrics::MEMORY_PHASE_TWO_JOBS,
1,
/*inc*/ 1,
&[("status", e)],
);
return;
@ -198,7 +198,7 @@ mod job {
} => {
session_telemetry.counter(
metrics::MEMORY_PHASE_TWO_JOBS,
1,
/*inc*/ 1,
&[("status", "claimed")],
);
(ownership_token, input_watermark)
@ -218,7 +218,7 @@ mod job {
) {
session.services.session_telemetry.counter(
metrics::MEMORY_PHASE_TWO_JOBS,
1,
/*inc*/ 1,
&[("status", reason)],
);
if matches!(
@ -250,7 +250,7 @@ mod job {
) {
session.services.session_telemetry.counter(
metrics::MEMORY_PHASE_TWO_JOBS,
1,
/*inc*/ 1,
&[("status", reason)],
);
let _ = db
@ -462,7 +462,7 @@ fn emit_metrics(session: &Arc<Session>, counters: Counters) {
otel.counter(
metrics::MEMORY_PHASE_TWO_JOBS,
1,
/*inc*/ 1,
&[("status", "agent_spawned")],
);
}

View file

@ -41,7 +41,7 @@ pub(crate) async fn emit_metric_for_tool_read(invocation: &ToolInvocation, succe
for kind in kinds {
invocation.turn.session_telemetry.counter(
MEMORIES_USAGE_METRIC,
1,
/*inc*/ 1,
&[
("kind", kind.as_tag()),
("tool", invocation.tool_name.as_str()),

View file

@ -182,7 +182,7 @@ impl ModelsManager {
auth_manager,
model_catalog,
collaboration_modes_config,
ModelProviderInfo::create_openai_provider(/* base_url */ None),
ModelProviderInfo::create_openai_provider(/*base_url*/ None),
)
}
@ -523,7 +523,7 @@ impl ModelsManager {
Self::new_with_provider(
codex_home,
auth_manager,
None,
/*model_catalog*/ None,
CollaborationModesConfig::default(),
provider,
)

View file

@ -80,7 +80,7 @@ pub(crate) fn model_info_from_slug(slug: &str) -> ModelInfo {
default_verbosity: None,
apply_patch_tool_type: None,
web_search_tool_type: WebSearchToolType::Text,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),

View file

@ -46,7 +46,7 @@ async fn build_config_state_with_mtimes() -> Result<(ConfigState, Vec<LayerMtime
let overrides = LoaderOverrides::default();
let config_layer_stack = load_config_layers_state(
&codex_home,
None,
/*cwd*/ None,
&cli_overrides,
overrides,
CloudRequirementsLoader::default(),
@ -78,7 +78,10 @@ async fn build_config_state_with_mtimes() -> Result<(ConfigState, Vec<LayerMtime
fn collect_layer_mtimes(stack: &ConfigLayerStack) -> Vec<LayerMtime> {
stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false)
.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
)
.iter()
.filter_map(|layer| {
let path = match &layer.name {
@ -113,7 +116,10 @@ fn network_constraints_from_trusted_layers(
layers: &ConfigLayerStack,
) -> Result<NetworkProxyConstraints> {
let mut constraints = NetworkProxyConstraints::default();
for layer in layers.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false) {
for layer in layers.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
if is_user_controlled_layer(&layer.name) {
continue;
}
@ -196,7 +202,10 @@ fn config_from_layers(
exec_policy: &codex_execpolicy::Policy,
) -> Result<NetworkProxyConfig> {
let mut config = NetworkProxyConfig::default();
for layer in layers.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false) {
for layer in layers.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
let parsed = network_tables_from_toml(&layer.config)?;
apply_network_tables(&mut config, parsed)?;
}

View file

@ -34,7 +34,7 @@ pub async fn maybe_migrate_personality(
}
let config_profile = config_toml
.get_config_profile(None)
.get_config_profile(/*override_profile*/ None)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
if config_toml.personality.is_some() || config_profile.personality.is_some() {
create_marker(&marker_path).await?;
@ -70,12 +70,12 @@ async fn has_recorded_sessions(codex_home: &Path, default_provider: &str) -> io:
&& let Some(ids) = state_db::list_thread_ids_db(
Some(state_db_ctx.as_ref()),
codex_home,
1,
None,
/*page_size*/ 1,
/*cursor*/ None,
ThreadSortKey::CreatedAt,
allowed_sources,
None,
false,
/*model_providers*/ None,
/*archived_only*/ false,
"personality_migration",
)
.await
@ -86,8 +86,8 @@ async fn has_recorded_sessions(codex_home: &Path, default_provider: &str) -> io:
let sessions = get_threads_in_root(
codex_home.join(SESSIONS_SUBDIR),
1,
None,
/*page_size*/ 1,
/*cursor*/ None,
ThreadSortKey::CreatedAt,
ThreadListConfig {
allowed_sources,
@ -103,8 +103,8 @@ async fn has_recorded_sessions(codex_home: &Path, default_provider: &str) -> io:
let archived_sessions = get_threads_in_root(
codex_home.join(ARCHIVED_SESSIONS_SUBDIR),
1,
None,
/*page_size*/ 1,
/*cursor*/ None,
ThreadSortKey::CreatedAt,
ThreadListConfig {
allowed_sources,

View file

@ -434,7 +434,11 @@ impl PluginsManager {
}
pub fn plugins_for_config(&self, config: &Config) -> PluginLoadOutcome {
self.plugins_for_layer_stack(&config.cwd, &config.config_layer_stack, false)
self.plugins_for_layer_stack(
&config.cwd,
&config.config_layer_stack,
/*force_reload*/ false,
)
}
pub fn plugins_for_layer_stack(

View file

@ -190,10 +190,10 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBu
}
let mut merged = TomlValue::Table(toml::map::Map::new());
for layer in config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false)
{
for layer in config.config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
if matches!(layer.name, ConfigLayerSource::Project { .. }) {
continue;
}

View file

@ -105,12 +105,12 @@ async fn load_recent_threads(sess: &Session) -> Vec<ThreadMetadata> {
match state_db
.list_threads(
MAX_RECENT_THREADS,
None,
/*anchor*/ None,
SortKey::UpdatedAt,
&[],
None,
false,
None,
/*model_providers*/ None,
/*archived_only*/ false,
/*search_term*/ None,
)
.await
{
@ -235,7 +235,7 @@ fn render_tree(root: &Path) -> Option<Vec<String>> {
}
let mut lines = Vec::new();
collect_tree_lines(root, 0, &mut lines);
collect_tree_lines(root, /*depth*/ 0, &mut lines);
(!lines.is_empty()).then_some(lines)
}

View file

@ -68,7 +68,7 @@ pub fn render_review_output_text(output: &ReviewOutputEvent) -> String {
sections.push(explanation.to_string());
}
if !output.findings.is_empty() {
let findings = format_review_findings_block(&output.findings, None);
let findings = format_review_findings_block(&output.findings, /*selection*/ None);
let trimmed = findings.trim();
if !trimmed.is_empty() {
sections.push(trimmed.to_string());

View file

@ -1223,7 +1223,7 @@ async fn find_thread_path_by_id_str_in_subdir(
..Default::default()
};
let results = file_search::run(id_str, vec![root], options, None)
let results = file_search::run(id_str, vec![root], options, /*cancel_flag*/ None)
.map_err(|e| io::Error::other(format!("file search failed: {e}")))?;
let found = results.matches.into_iter().next().map(|m| m.full_path());

View file

@ -180,7 +180,7 @@ impl RolloutRecorder {
allowed_sources,
model_providers,
default_provider,
false,
/*archived*/ false,
search_term,
)
.await
@ -206,7 +206,7 @@ impl RolloutRecorder {
allowed_sources,
model_providers,
default_provider,
true,
/*archived*/ true,
search_term,
)
.await
@ -320,8 +320,8 @@ impl RolloutRecorder {
sort_key,
allowed_sources,
model_providers,
false,
None,
/*archived*/ false,
/*search_term*/ None,
)
.await
else {
@ -889,7 +889,7 @@ async fn write_and_reconcile_items(
state_builder,
items,
default_provider,
None,
/*new_thread_memory_mode*/ None,
)
.await;
Ok(())

View file

@ -728,7 +728,13 @@ pub async fn execute_env(
stdout_stream: Option<StdoutStream>,
) -> crate::error::Result<ExecToolCallOutput> {
let effective_policy = exec_request.sandbox_policy.clone();
execute_exec_request(exec_request, &effective_policy, stdout_stream, None).await
execute_exec_request(
exec_request,
&effective_policy,
stdout_stream,
/*after_spawn*/ None,
)
.await
}
pub async fn execute_exec_request_with_after_spawn(

View file

@ -45,8 +45,13 @@ pub async fn spawn_command_under_seatbelt(
network: Option<&NetworkProxy>,
mut env: HashMap<String, String>,
) -> std::io::Result<Child> {
let args =
create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd, false, network);
let args = create_seatbelt_command_args(
command,
sandbox_policy,
sandbox_policy_cwd,
/*enforce_managed_network*/ false,
network,
);
let arg0 = None;
env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
spawn_child_async(SpawnChildRequest {
@ -338,7 +343,7 @@ pub(crate) fn create_seatbelt_command_args(
sandbox_policy_cwd,
enforce_managed_network,
network,
None,
/*extensions*/ None,
)
}

View file

@ -291,20 +291,20 @@ pub fn default_user_shell() -> Shell {
fn default_user_shell_from_path(user_shell_path: Option<PathBuf>) -> Shell {
if cfg!(windows) {
get_shell(ShellType::PowerShell, None).unwrap_or(ultimate_fallback_shell())
get_shell(ShellType::PowerShell, /*path*/ None).unwrap_or(ultimate_fallback_shell())
} else {
let user_default_shell = user_shell_path
.and_then(|shell| detect_shell_type(&shell))
.and_then(|shell_type| get_shell(shell_type, None));
.and_then(|shell_type| get_shell(shell_type, /*path*/ None));
let shell_with_fallback = if cfg!(target_os = "macos") {
user_default_shell
.or_else(|| get_shell(ShellType::Zsh, None))
.or_else(|| get_shell(ShellType::Bash, None))
.or_else(|| get_shell(ShellType::Zsh, /*path*/ None))
.or_else(|| get_shell(ShellType::Bash, /*path*/ None))
} else {
user_default_shell
.or_else(|| get_shell(ShellType::Bash, None))
.or_else(|| get_shell(ShellType::Zsh, None))
.or_else(|| get_shell(ShellType::Bash, /*path*/ None))
.or_else(|| get_shell(ShellType::Zsh, /*path*/ None))
};
shell_with_fallback.unwrap_or(ultimate_fallback_shell())

View file

@ -102,7 +102,7 @@ impl ShellSnapshot {
if let Some(failure_reason) = snapshot.as_ref().err() {
counter_tags.push(("failure_reason", *failure_reason));
}
session_telemetry.counter("codex.shell_snapshot", 1, &counter_tags);
session_telemetry.counter("codex.shell_snapshot", /*inc*/ 1, &counter_tags);
let _ = shell_snapshot_tx.send(snapshot.ok());
}
.instrument(snapshot_span),
@ -199,7 +199,7 @@ async fn write_shell_snapshot(
if shell_type == ShellType::PowerShell || shell_type == ShellType::Cmd {
bail!("Shell snapshot not supported yet for {shell_type:?}");
}
let shell = get_shell(shell_type.clone(), None)
let shell = get_shell(shell_type.clone(), /*path*/ None)
.with_context(|| format!("No available shell for {shell_type:?}"))?;
let raw_snapshot = capture_snapshot(&shell, cwd).await?;
@ -243,13 +243,26 @@ fn strip_snapshot_preamble(snapshot: &str) -> Result<String> {
async fn validate_snapshot(shell: &Shell, snapshot_path: &Path, cwd: &Path) -> Result<()> {
let snapshot_path_display = snapshot_path.display();
let script = format!("set -e; . \"{snapshot_path_display}\"");
run_script_with_timeout(shell, &script, SNAPSHOT_TIMEOUT, false, cwd)
.await
.map(|_| ())
run_script_with_timeout(
shell,
&script,
SNAPSHOT_TIMEOUT,
/*use_login_shell*/ false,
cwd,
)
.await
.map(|_| ())
}
async fn run_shell_script(shell: &Shell, script: &str, cwd: &Path) -> Result<String> {
run_script_with_timeout(shell, script, SNAPSHOT_TIMEOUT, true, cwd).await
run_script_with_timeout(
shell,
script,
SNAPSHOT_TIMEOUT,
/*use_login_shell*/ true,
cwd,
)
.await
}
async fn run_script_with_timeout(

View file

@ -81,7 +81,7 @@ fn emit_skill_injected_metric(
otel.counter(
"codex.skill.injected",
1,
/*inc*/ 1,
&[("status", status), ("skill", skill.name.as_str())],
);
}

View file

@ -96,7 +96,7 @@ pub(crate) async fn maybe_emit_implicit_skill_invocation(
turn_context.session_telemetry.counter(
"codex.skill.injected",
1,
/*inc*/ 1,
&[
("status", "ok"),
("skill", skill_name.as_str()),

View file

@ -247,9 +247,10 @@ fn skill_roots_from_layer_stack_inner(
) -> Vec<SkillRoot> {
let mut roots = Vec::new();
for layer in
config_layer_stack.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst, true)
{
for layer in config_layer_stack.get_layers(
ConfigLayerStackOrdering::HighestPrecedenceFirst,
/*include_disabled*/ true,
) {
let Some(config_folder) = layer.config_folder() else {
continue;
};
@ -321,9 +322,10 @@ fn repo_agents_skill_roots(config_layer_stack: &ConfigLayerStack, cwd: &Path) ->
fn project_root_markers_from_stack(config_layer_stack: &ConfigLayerStack) -> Vec<String> {
let mut merged = TomlValue::Table(toml::map::Map::new());
for layer in
config_layer_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, false)
{
for layer in config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ false,
) {
if matches!(layer.name, ConfigLayerSource::Project { .. }) {
continue;
}

View file

@ -250,9 +250,10 @@ fn disabled_paths_from_stack(
config_layer_stack: &crate::config_loader::ConfigLayerStack,
) -> HashSet<PathBuf> {
let mut configs = HashMap::new();
for layer in
config_layer_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
{
for layer in config_layer_stack.get_layers(
ConfigLayerStackOrdering::LowestPrecedenceFirst,
/*include_disabled*/ true,
) {
if !matches!(
layer.name,
ConfigLayerSource::User { .. } | ConfigLayerSource::SessionFlags

View file

@ -350,7 +350,7 @@ pub async fn reconcile_rollout(
items,
"reconcile_rollout",
new_thread_memory_mode,
None,
/*updated_at_override*/ None,
)
.await;
return;
@ -472,10 +472,10 @@ pub async fn read_repair_rollout_path(
Some(ctx),
rollout_path,
default_provider.as_str(),
None,
/*builder*/ None,
&[],
archived_only,
None,
/*new_thread_memory_mode*/ None,
)
.await;
}

View file

@ -32,14 +32,14 @@ impl SessionTask for CompactTask {
let _ = if crate::compact::should_use_remote_compact_task(&ctx.provider) {
let _ = session.services.session_telemetry.counter(
"codex.task.compact",
1,
/*inc*/ 1,
&[("type", "remote")],
);
crate::compact_remote::run_remote_compact_task(session.clone(), ctx).await
} else {
let _ = session.services.session_telemetry.counter(
"codex.task.compact",
1,
/*inc*/ 1,
&[("type", "local")],
);
crate::compact::run_compact_task(session.clone(), ctx, input).await

View file

@ -68,7 +68,11 @@ fn emit_turn_network_proxy_metric(
} else {
"false"
};
session_telemetry.counter(TURN_NETWORK_PROXY_METRIC, 1, &[("active", active), tmp_mem]);
session_telemetry.counter(
TURN_NETWORK_PROXY_METRIC,
/*inc*/ 1,
&[("active", active), tmp_mem],
);
}
/// Thin wrapper that exposes the parts of [`Session`] task runners need.

Some files were not shown because too many files have changed in this diff Show more