chore(deps): Update some packages realated to ring and aws-lc (#2991)

After `aws-lc-rs 0.15` it seems the illumos issue is resolved, so
reviving the `metrics` et.al. update PR, specifically:
- Update `metrics` to 0.24 and `metrics-exporter-prometheus` to 0.17
- Drop the `ring` feature from `rustls`
- Update `reqwest` to 0.12 (dropping `rustls 0.21` from the lock file)

There still seem to be `ring` dependencies, but not sure if these can be
dropped

---------

Co-authored-by: Helmut K. C. Tessarek <tessarek@evermeet.cx>
This commit is contained in:
Cristian Le
2025-11-18 05:04:24 +01:00
committed by GitHub
parent 09230fd08d
commit e33b3de253
10 changed files with 254 additions and 400 deletions

614
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -51,7 +51,7 @@ version = "0.3"
features = ["ansi", "fmt", "registry", "env-filter"]
[workspace.dependencies.reqwest]
version = "0.11"
version = "0.12"
features = ["json", "rustls-tls-native-roots"]
default-features = false

View File

@@ -20,6 +20,6 @@ serde = { workspace = true }
sqlx = { workspace = true }
async-trait = { workspace = true }
uuid = { workspace = true }
metrics = "0.21.1"
metrics = "0.24"
futures-util = "0.3"
rand.workspace = true

View File

@@ -20,5 +20,5 @@ serde = { workspace = true }
sqlx = { workspace = true, features = ["sqlite", "regexp"] }
async-trait = { workspace = true }
uuid = { workspace = true }
metrics = "0.21.1"
metrics = "0.24"
futures-util = "0.3"

View File

@@ -24,14 +24,14 @@ rand = { workspace = true }
tokio = { workspace = true }
async-trait = { workspace = true }
axum = "0.7"
axum-server = { version = "0.7", features = ["tls-rustls-no-provider"] }
axum-server = { version = "0.7", features = ["tls-rustls"] }
fs-err = { workspace = true }
tower = { workspace = true }
tower-http = { version = "0.6", features = ["trace"] }
reqwest = { workspace = true }
rustls = { version = "0.23", features = ["ring"], default-features = false }
rustls = { version = "0.23"}
argon2 = "0.5"
semver = { workspace = true }
metrics-exporter-prometheus = "0.12.1"
metrics = "0.21.1"
metrics-exporter-prometheus = "0.17"
metrics = "0.24"
postmark = {version= "0.11", features=["reqwest", "reqwest-rustls-tls"]}

View File

@@ -65,7 +65,7 @@ pub async fn list<DB: Database>(
if req.sync_ts.unix_timestamp_nanos() < 0 || req.history_ts.unix_timestamp_nanos() < 0 {
error!("client asked for history from < epoch 0");
counter!("atuin_history_epoch_before_zero", 1);
counter!("atuin_history_epoch_before_zero").increment(1);
return Err(
ErrorResponse::reply("asked for history from before epoch 0")
@@ -95,7 +95,7 @@ pub async fn list<DB: Database>(
user.id
);
counter!("atuin_history_returned", history.len() as u64);
counter!("atuin_history_returned").increment(history.len() as u64);
Ok(Json(SyncHistoryResponse { history }))
}
@@ -131,7 +131,7 @@ pub async fn add<DB: Database>(
let State(AppState { database, settings }) = state;
debug!("request to add {} history items", req.len());
counter!("atuin_history_uploaded", req.len() as u64);
counter!("atuin_history_uploaded").increment(req.len() as u64);
let mut history: Vec<NewHistory> = req
.into_iter()
@@ -151,7 +151,7 @@ pub async fn add<DB: Database>(
// Don't return an error here. We want to insert as much of the
// history list as we can, so log the error and continue going.
if !keep {
counter!("atuin_history_too_long", 1);
counter!("atuin_history_too_long").increment(1);
tracing::warn!(
"history too long, got length {}, max {}",

View File

@@ -146,7 +146,7 @@ pub async fn register<DB: Database>(
.await;
}
counter!("atuin_users_registered", 1);
counter!("atuin_users_registered").increment(1);
match db.add_session(&new_session).await {
Ok(_) => Ok(Json(RegisterResponse { session: token })),
@@ -173,7 +173,7 @@ pub async fn delete<DB: Database>(
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
};
counter!("atuin_users_deleted", 1);
counter!("atuin_users_deleted").increment(1);
Ok(Json(DeleteUserResponse {}))
}

View File

@@ -25,14 +25,14 @@ pub async fn post<DB: Database>(
"request to add records"
);
counter!("atuin_record_uploaded", records.len() as u64);
counter!("atuin_record_uploaded").increment(records.len() as u64);
let keep = records
.iter()
.all(|r| r.data.data.len() <= settings.max_record_size || settings.max_record_size == 0);
if !keep {
counter!("atuin_record_too_large", 1);
counter!("atuin_record_too_large").increment(1);
return Err(
ErrorResponse::reply("could not add records; record too large")
@@ -108,7 +108,7 @@ pub async fn next<DB: Database>(
}
};
counter!("atuin_record_downloaded", records.len() as u64);
counter!("atuin_record_downloaded").increment(records.len() as u64);
Ok(Json(records))
}

View File

@@ -24,14 +24,14 @@ pub async fn delete<DB: Database>(
}) = state;
if let Err(e) = database.delete_store(&user).await {
counter!("atuin_store_delete_failed", 1);
counter!("atuin_store_delete_failed").increment(1);
error!("failed to delete store {e:?}");
return Err(ErrorResponse::reply("failed to delete store")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
}
counter!("atuin_store_deleted", 1);
counter!("atuin_store_deleted").increment(1);
Ok(())
}

View File

@@ -48,8 +48,8 @@ pub async fn track_metrics(req: Request, next: Next) -> impl IntoResponse {
("status", status),
];
metrics::increment_counter!("http_requests_total", &labels);
metrics::histogram!("http_requests_duration_seconds", latency, &labels);
metrics::counter!("http_requests_total", &labels).increment(1);
metrics::histogram!("http_requests_duration_seconds", &labels).record(latency);
response
}