Skip to content

Commit

Permalink
Return rate limit stats
Browse files Browse the repository at this point in the history
  • Loading branch information
jelmer committed Sep 21, 2024
1 parent c0e5325 commit 4130dc9
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 14 deletions.
11 changes: 4 additions & 7 deletions publish/src/bin/janitor-publish.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct Args {

/// Limit number of pushes per cycle.
#[clap(long)]
push_limit: Option<i32>,
push_limit: Option<usize>,

/// Require a binary diff when publishing merge requests.
#[clap(long)]
Expand Down Expand Up @@ -104,7 +104,7 @@ async fn main() -> Result<(), i32> {
Box::new(NonRateLimiter) as Box<dyn RateLimiter>
});

let forge_rate_limiter = Arc::new(Mutex::new(HashMap::new()));
let forge_rate_limiter = Mutex::new(HashMap::new());

let vcs_managers = Box::new(janitor::vcs::get_vcs_managers_from_config(config));
let vcs_managers: &'static _ = Box::leak(vcs_managers);
Expand Down Expand Up @@ -150,6 +150,8 @@ async fn main() -> Result<(), i32> {
let state = Arc::new(janitor_publish::AppState {
conn: db.clone(),
bucket_rate_limiter,
forge_rate_limiter,
push_limit: args.push_limit,
});

if args.once {
Expand All @@ -159,7 +161,6 @@ async fn main() -> Result<(), i32> {
config,
publish_worker.clone(),
vcs_managers,
args.push_limit,
args.require_binary_diff,
)
.await
Expand All @@ -184,11 +185,9 @@ async fn main() -> Result<(), i32> {
redis_async_connection.clone(),
config,
publish_worker.clone(),
forge_rate_limiter.clone(),
vcs_managers,
chrono::Duration::seconds(args.interval),
!args.no_auto_publish,
args.push_limit,
args.modify_mp_limit,
args.require_binary_diff,
));
Expand All @@ -207,11 +206,9 @@ async fn main() -> Result<(), i32> {
let app = janitor_publish::web::app(
state.clone(),
publish_worker.clone(),
forge_rate_limiter.clone(),
vcs_managers,
args.require_binary_diff,
args.modify_mp_limit,
args.push_limit,
redis_async_connection.clone(),
config,
);
Expand Down
5 changes: 2 additions & 3 deletions publish/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -578,11 +578,9 @@ pub async fn process_queue_loop(
redis: Option<redis::aio::ConnectionManager>,
config: &janitor::config::Config,
publish_worker: Arc<Mutex<PublishWorker>>,
forge_rate_limiter: Arc<Mutex<HashMap<Forge, chrono::DateTime<Utc>>>>,
vcs_managers: &HashMap<VcsType, Box<dyn VcsManager>>,
interval: chrono::Duration,
auto_publish: bool,
push_limit: Option<i32>,
modify_mp_limit: Option<i32>,
require_binary_diff: bool,
) {
Expand All @@ -595,7 +593,6 @@ pub async fn publish_pending_ready(
config: &janitor::config::Config,
publish_worker: Arc<Mutex<PublishWorker>>,
vcs_managers: &HashMap<VcsType, Box<dyn VcsManager>>,
push_limit: Option<i32>,
require_binary_diff: bool,
) -> Result<(), PublishError> {
todo!();
Expand Down Expand Up @@ -677,4 +674,6 @@ mod tests {
pub struct AppState {
pub conn: sqlx::PgPool,
pub bucket_rate_limiter: Mutex<Box<dyn rate_limiter::RateLimiter>>,
pub forge_rate_limiter: Mutex<HashMap<Forge, chrono::DateTime<Utc>>>,
pub push_limit: Option<usize>,
}
57 changes: 53 additions & 4 deletions publish/src/web.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,59 @@ async fn get_rate_limit() {
unimplemented!()
}

async fn get_all_rate_limits() {
unimplemented!()
#[derive(serde::Serialize, serde::Deserialize)]
struct BucketRateLimit {
open: Option<usize>,
max_open: Option<usize>,
remaining: Option<usize>,
}

#[derive(serde::Serialize, serde::Deserialize)]
struct RateLimitsInfo {
per_bucket: HashMap<String, BucketRateLimit>,
per_forge: HashMap<String, chrono::DateTime<chrono::Utc>>,
push_limit: Option<usize>,
}

async fn get_all_rate_limits(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let stats = state.bucket_rate_limiter.lock().unwrap().get_stats();

let per_bucket = if let Some(stats) = stats {
let mut per_bucket = HashMap::new();
for (bucket, current_open) in stats.per_bucket.iter() {
let max_open = state
.bucket_rate_limiter
.lock()
.unwrap()
.get_max_open(bucket);
per_bucket.insert(
bucket.clone(),
BucketRateLimit {
open: Some(*current_open),
max_open,
remaining: max_open.map(|max_open| max_open - *current_open),
},
);
}
per_bucket
} else {
HashMap::new()
};

Json(
serde_json::to_value(&RateLimitsInfo {
per_bucket,
per_forge: state
.forge_rate_limiter
.lock()
.unwrap()
.iter()
.map(|(f, t)| (f.forge_name().to_string(), *t))
.collect(),
push_limit: state.push_limit,
})
.unwrap(),
)
}

#[derive(serde::Serialize, serde::Deserialize)]
Expand Down Expand Up @@ -388,11 +439,9 @@ WHERE run.id = $1
pub fn app(
state: Arc<AppState>,
worker: Arc<Mutex<crate::PublishWorker>>,
forge_rate_limiter: Arc<Mutex<HashMap<Forge, chrono::DateTime<chrono::Utc>>>>,
vcs_managers: &HashMap<VcsType, Box<dyn VcsManager>>,
require_binary_diff: bool,
modify_mp_limit: Option<i32>,
push_limit: Option<i32>,
redis: Option<redis::aio::ConnectionManager>,
config: &janitor::config::Config,
) -> Router {
Expand Down

0 comments on commit 4130dc9

Please sign in to comment.