Skip to content

Commit 90fa069

Browse files
Clara Rullmeta-codesync[bot]
authored andcommitted
Delete dewey_res.bzl
Summary: Delete tools/build_defs/android/dewey_res.bzl which provides the dewey_custom_res macro. Dewey backing data was removed on April 1. The only two remaining consumers (oxygen zip and verification tests) are being cleaned up in D99099121 and D99099398. This diff removes the macro definition itself. Part of the Dewey deprecation cleanup. Reviewed By: YousefSalama Differential Revision: D99820098 fbshipit-source-id: bf58ea7fb287d84ba725e1a9903165c4feed2898
1 parent d03800c commit 90fa069

File tree

3 files changed

+100
-96
lines changed

3 files changed

+100
-96
lines changed

eden/mononoke/features/async_requests/worker/src/main.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,8 @@ impl RepoShardedProcess for WorkerProcess {
122122
.add_repo(repo_name)
123123
.await
124124
.with_context(|| format!("Failure in setting up repo {}", repo_name))?;
125-
let repos = vec![repo.repo_identity.id()];
125+
let repo_id = repo.repo_identity.id();
126+
let repos = vec![repo_id];
126127
info!("Completed setup for repos {:?}", repos);
127128

128129
let queue = Arc::new(AsyncMethodRequestQueue::new(
@@ -138,6 +139,7 @@ impl RepoShardedProcess for WorkerProcess {
138139
self.mononoke.clone(),
139140
self.megarepo.clone(),
140141
self.will_exit.clone(),
142+
Some(repo_id),
141143
)
142144
.await?;
143145
Ok(Arc::new(executor))
@@ -311,6 +313,7 @@ fn run_worker_queue(
311313
mononoke.clone(),
312314
megarepo.clone(),
313315
will_exit.clone(),
316+
None, // non-sharded: no specific repo_id
314317
))?
315318
};
316319
runtime.spawn(async move { executor.execute().await });

eden/mononoke/features/async_requests/worker/src/stats.rs

Lines changed: 86 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,13 @@
55
* GNU General Public License version 2.
66
*/
77

8-
use std::collections::HashMap;
98
use std::time::Duration;
109

1110
use async_requests::AsyncMethodRequestQueue;
1211
use async_requests::types::RequestStatus;
1312
use context::CoreContext;
1413
use mononoke_api::RepositoryId;
1514
use mononoke_types::Timestamp;
16-
use requests_table::QueueStatsEntry;
1715
use stats::define_stats;
1816
use stats::prelude::*;
1917
use tracing::warn;
@@ -37,9 +35,19 @@ define_stats! {
3735
queue_age_by_repo_and_status: dynamic_singleton_counter("queue.{}.age_s.{}", (repo_id: String, status: String)),
3836
}
3937

38+
/// Report queue stats for this shard's repo only.
39+
///
40+
/// Each shard reports metrics only for its own repo_id. This avoids
41+
/// cross-shard racing on shared counters and prevents stale values
42+
/// when a shard is reassigned: the counter simply goes absent (no
43+
/// data) rather than being stuck at a stale non-zero value. A dead
44+
/// shard detector should alert on absence of data.
45+
///
46+
/// For the non-sharded code path, `repo_id` is `None` and only the
47+
/// global (non-per-repo) counters are reported.
4048
pub(crate) async fn stats_loop(
4149
ctx: &CoreContext,
42-
repo_ids: Vec<RepositoryId>,
50+
repo_id: Option<RepositoryId>,
4351
queue: &AsyncMethodRequestQueue,
4452
) {
4553
loop {
@@ -49,8 +57,10 @@ pub(crate) async fn stats_loop(
4957
Ok(res) => {
5058
process_queue_length_by_status(ctx, &res);
5159
process_queue_age_by_status(ctx, now, &res);
52-
process_queue_length_by_repo_and_status(ctx, &repo_ids, &res);
53-
process_queue_age_by_repo_and_status(ctx, &repo_ids, now, &res);
60+
if let Some(repo_id) = &repo_id {
61+
process_queue_length_by_repo(ctx, repo_id, &res);
62+
process_queue_age_by_repo(ctx, repo_id, now, &res);
63+
}
5464
}
5565
Err(err) => {
5666
STATS::stats_error.add_value(1);
@@ -62,51 +72,22 @@ pub(crate) async fn stats_loop(
6272
}
6373
}
6474

65-
// Keep track of the stats we have already logged. Any missing ones, we will log a 0.
66-
struct Seen {
67-
inner: HashMap<QueueStatsEntry, bool>,
68-
}
69-
70-
impl Seen {
71-
fn new(repo_ids: &Vec<RepositoryId>) -> Self {
72-
let mut seen = HashMap::new();
73-
for status in STATUSES {
74-
for repo_id in repo_ids {
75-
seen.insert(
76-
QueueStatsEntry {
77-
repo_id: Some(repo_id.clone()),
78-
status,
79-
},
80-
false,
81-
);
82-
}
83-
}
84-
Self { inner: seen }
85-
}
86-
87-
fn mark(&mut self, repo_id: Option<RepositoryId>, status: RequestStatus) {
88-
self.inner.insert(QueueStatsEntry { repo_id, status }, true);
89-
}
90-
91-
fn get_missing(&self) -> Vec<QueueStatsEntry> {
92-
self.inner
93-
.iter()
94-
.filter_map(|(entry, seen)| if !*seen { Some(entry) } else { None })
95-
.cloned()
96-
.collect()
97-
}
98-
}
99-
10075
fn process_queue_length_by_status(ctx: &CoreContext, res: &requests_table::QueueStats) {
101-
let mut seen = Seen::new(&vec![]);
76+
// Report whatever the DB returns for global stats, then zero out
77+
// any of the 4 statuses that were absent from the result.
78+
let mut seen = [false; 4];
10279
let stats = &res.queue_length_by_status;
10380
for (status, count) in stats {
104-
seen.mark(None, *status);
81+
if let Some(idx) = status_index(status) {
82+
seen[idx] = true;
83+
}
10584
STATS::queue_length_by_status.set_value(ctx.fb, *count as i64, (status.to_string(),));
10685
}
10786

108-
for entry in seen.get_missing() {
109-
STATS::queue_length_by_status.set_value(ctx.fb, 0, (entry.status.to_string(),));
87+
for (idx, was_seen) in seen.iter().enumerate() {
88+
if !was_seen {
89+
STATS::queue_length_by_status.set_value(ctx.fb, 0, (STATUSES[idx].to_string(),));
90+
}
11091
}
11192
}
11293

@@ -115,79 +96,93 @@ fn process_queue_age_by_status(
11596
now: Timestamp,
11697
res: &requests_table::QueueStats,
11798
) {
118-
let mut seen = Seen::new(&vec![]);
99+
let mut seen = [false; 4];
119100
let stats = &res.queue_age_by_status;
120101
for (status, ts) in stats {
121-
seen.mark(None, *status);
102+
if let Some(idx) = status_index(status) {
103+
seen[idx] = true;
104+
}
122105
let diff = std::cmp::max(now.timestamp_seconds() - ts.timestamp_seconds(), 0);
123106
STATS::queue_age_by_status.set_value(ctx.fb, diff, (status.to_string(),));
124107
}
125108

126-
for entry in seen.get_missing() {
127-
STATS::queue_age_by_status.set_value(ctx.fb, 0, (entry.status.to_string(),));
109+
for (idx, was_seen) in seen.iter().enumerate() {
110+
if !was_seen {
111+
STATS::queue_age_by_status.set_value(ctx.fb, 0, (STATUSES[idx].to_string(),));
112+
}
128113
}
129114
}
130115

131-
fn process_queue_length_by_repo_and_status(
116+
/// Report per-repo queue length for this shard's repo only.
117+
/// If the DB returns no data for a status, we report 0.
118+
fn process_queue_length_by_repo(
132119
ctx: &CoreContext,
133-
repo_ids: &Vec<RepositoryId>,
120+
repo_id: &RepositoryId,
134121
res: &requests_table::QueueStats,
135122
) {
136-
let mut seen = Seen::new(repo_ids);
123+
let mut seen = [false; 4];
124+
let repo_id_str = repo_id.to_string();
137125
let stats = &res.queue_length_by_repo_and_status;
138126
for (entry, count) in stats {
139-
seen.mark(entry.repo_id, entry.status);
140-
STATS::queue_length_by_repo_and_status.set_value(
141-
ctx.fb,
142-
*count as i64,
143-
(
144-
entry.repo_id.unwrap_or(RepositoryId::new(0)).to_string(),
145-
entry.status.to_string(),
146-
),
147-
);
127+
if entry.repo_id.as_ref() == Some(repo_id) {
128+
if let Some(idx) = status_index(&entry.status) {
129+
seen[idx] = true;
130+
}
131+
STATS::queue_length_by_repo_and_status.set_value(
132+
ctx.fb,
133+
*count as i64,
134+
(repo_id_str.clone(), entry.status.to_string()),
135+
);
136+
}
148137
}
149138

150-
for entry in seen.get_missing() {
151-
STATS::queue_length_by_repo_and_status.set_value(
152-
ctx.fb,
153-
0,
154-
(
155-
entry.repo_id.unwrap_or(RepositoryId::new(0)).to_string(),
156-
entry.status.to_string(),
157-
),
158-
);
139+
for (idx, was_seen) in seen.iter().enumerate() {
140+
if !was_seen {
141+
STATS::queue_length_by_repo_and_status.set_value(
142+
ctx.fb,
143+
0,
144+
(repo_id_str.clone(), STATUSES[idx].to_string()),
145+
);
146+
}
159147
}
160148
}
161149

162-
fn process_queue_age_by_repo_and_status(
150+
/// Report per-repo queue age for this shard's repo only.
151+
/// If the DB returns no data for a status, we report 0.
152+
fn process_queue_age_by_repo(
163153
ctx: &CoreContext,
164-
repo_ids: &Vec<RepositoryId>,
154+
repo_id: &RepositoryId,
165155
now: Timestamp,
166156
res: &requests_table::QueueStats,
167157
) {
168-
let mut seen = Seen::new(repo_ids);
158+
let mut seen = [false; 4];
159+
let repo_id_str = repo_id.to_string();
169160
let stats = &res.queue_age_by_repo_and_status;
170161
for (entry, ts) in stats {
171-
seen.mark(entry.repo_id, entry.status);
172-
let diff = std::cmp::max(now.timestamp_seconds() - ts.timestamp_seconds(), 0);
173-
STATS::queue_age_by_repo_and_status.set_value(
174-
ctx.fb,
175-
diff,
176-
(
177-
entry.repo_id.unwrap_or(RepositoryId::new(0)).to_string(),
178-
entry.status.to_string(),
179-
),
180-
);
162+
if entry.repo_id.as_ref() == Some(repo_id) {
163+
if let Some(idx) = status_index(&entry.status) {
164+
seen[idx] = true;
165+
}
166+
let diff = std::cmp::max(now.timestamp_seconds() - ts.timestamp_seconds(), 0);
167+
STATS::queue_age_by_repo_and_status.set_value(
168+
ctx.fb,
169+
diff,
170+
(repo_id_str.clone(), entry.status.to_string()),
171+
);
172+
}
181173
}
182174

183-
for entry in seen.get_missing() {
184-
STATS::queue_age_by_repo_and_status.set_value(
185-
ctx.fb,
186-
0,
187-
(
188-
entry.repo_id.unwrap_or(RepositoryId::new(0)).to_string(),
189-
entry.status.to_string(),
190-
),
191-
);
175+
for (idx, was_seen) in seen.iter().enumerate() {
176+
if !was_seen {
177+
STATS::queue_age_by_repo_and_status.set_value(
178+
ctx.fb,
179+
0,
180+
(repo_id_str.clone(), STATUSES[idx].to_string()),
181+
);
182+
}
192183
}
193184
}
185+
186+
fn status_index(status: &RequestStatus) -> Option<usize> {
187+
STATUSES.iter().position(|s| s == status)
188+
}

eden/mononoke/features/async_requests/worker/src/worker.rs

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ use futures_stats::TimedFutureExt;
4242
use hostname::get_hostname;
4343
use megarepo_api::MegarepoApi;
4444
use mononoke_api::Mononoke;
45-
use mononoke_api::MononokeRepo;
4645
use mononoke_api::Repo;
46+
use mononoke_api::RepositoryId;
4747
use mononoke_macros::mononoke;
4848
use mononoke_types::Timestamp;
4949
use stats::define_stats;
@@ -86,6 +86,8 @@ pub struct AsyncMethodRequestWorker {
8686
will_exit: Arc<AtomicBool>,
8787
limit: Option<usize>,
8888
concurrency_limit: usize,
89+
/// The repo this shard is responsible for. `None` for non-sharded workers.
90+
repo_id: Option<RepositoryId>,
8991
}
9092

9193
impl AsyncMethodRequestWorker {
@@ -96,6 +98,7 @@ impl AsyncMethodRequestWorker {
9698
mononoke: Arc<Mononoke<Repo>>,
9799
megarepo: Arc<MegarepoApi<Repo>>,
98100
will_exit: Arc<AtomicBool>,
101+
repo_id: Option<RepositoryId>,
99102
) -> Result<Self, Error> {
100103
let name = {
101104
let tw_job_cluster = std::env::var("TW_JOB_CLUSTER");
@@ -121,6 +124,7 @@ impl AsyncMethodRequestWorker {
121124
will_exit,
122125
limit: args.request_limit,
123126
concurrency_limit: args.jobs,
127+
repo_id,
124128
})
125129
}
126130
}
@@ -133,11 +137,13 @@ impl RepoShardedProcessExecutor for AsyncMethodRequestWorker {
133137
/// will_exit atomic bool is a flag to prevent the worker from grabbing new
134138
/// items from the queue and gracefully terminate.
135139
async fn execute(&self) -> Result<()> {
136-
// Start the stats logger loop
140+
// Start the stats logger loop.
141+
// Each shard reports metrics only for its own repo to avoid
142+
// cross-shard counter races and stale values on shard reassignment.
137143
let (stats, stats_abort_handle) = abortable({
138144
cloned!(self.ctx, self.queue);
139-
let repo_ids = self.mononoke.known_repo_ids().clone();
140-
async move { stats_loop(&ctx, repo_ids, &queue).await }
145+
let repo_id = self.repo_id;
146+
async move { stats_loop(&ctx, repo_id, &queue).await }
141147
});
142148
let _stats = mononoke::spawn_task(stats);
143149

0 commit comments

Comments
 (0)