-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrateLimiter.js
More file actions
189 lines (162 loc) · 7.09 KB
/
rateLimiter.js
File metadata and controls
189 lines (162 loc) · 7.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
'use strict';
// =============================================================================
// rateLimiter.js — Telegram API rate limiter (Upgraded)
// =============================================================================
//
// Enforces:
// - Global: ~35–40ms between any two Telegram API calls (~25–28/sec)
// - Per-chat: 1100ms between consecutive messages to the same chat
// - Timeout: API calls that hang >15s are aborted safely
//
// Improvements:
// - Jitter smoothing to avoid burst clustering under heavy load
// - Global timestamp updated AFTER call start for smoother throughput
// - Stronger Promise isolation so one failure never breaks the chain
// - More accurate per-chat cleanup
// - Better diagnostics for /health/full
// - throttleWithConfig() for testall/audit use with optional wait callback
//
// Fully compatible with Node >=20 and the Runewager architecture.
// =============================================================================
// ─── Tuning constants ─────────────────────────────────────────────────────────
const GLOBAL_GAP_MS = 35; // ≤28 req/sec globally (Telegram hard limit: 30/sec)
const CHAT_GAP_MS = 1100; // 1 msg/sec per chat
const API_CALL_TIMEOUT_MS = 15000; // max time for a single Telegram API call
const JITTER_MS = 5; // small random jitter to avoid burst alignment
// ─── State ────────────────────────────────────────────────────────────────────
const _chatQueues = new Map(); // chatId → tail Promise
let _globalTail = Promise.resolve(); // global chain
let _globalLast = 0; // timestamp of last API call start
// ─── Helpers ──────────────────────────────────────────────────────────────────
const _delay = (ms) => new Promise((r) => setTimeout(r, ms));
const _jitter = () => Math.floor(Math.random() * JITTER_MS);
// ─── Global rate slot ─────────────────────────────────────────────────────────
/**
* Guarantees GLOBAL_GAP_MS spacing between API calls globally.
* Adds jitter to avoid burst alignment.
*
* @param {() => Promise<any>} fn
* @returns {Promise<any>}
*/
function _globalSlot(fn) {
let resolve, reject;
const resultP = new Promise((res, rej) => { resolve = res; reject = rej; });
_globalTail = _globalTail
.then(async () => {
const now = Date.now();
const wait = Math.max(0, _globalLast + GLOBAL_GAP_MS - now);
if (wait > 0) await _delay(wait + _jitter());
// Update AFTER spacing, BEFORE call
_globalLast = Date.now();
try {
const result = await Promise.race([
Promise.resolve().then(fn),
_delay(API_CALL_TIMEOUT_MS).then(() => {
throw new Error(`rateLimiter: API call timed out after ${API_CALL_TIMEOUT_MS}ms`);
}),
]);
resolve(result);
} catch (err) {
reject(err);
}
})
.catch(() => {
// Never break the global chain
});
return resultP;
}
// ─── Public API: per-chat queue ───────────────────────────────────────────────
/**
* Enqueue fn for a specific chatId.
*
* - Per-chat serialization
* - Per-chat spacing (CHAT_GAP_MS)
* - Global throttle
*
* @param {string|number|null} chatId - Telegram chat/user ID (null → global queue)
* @param {() => Promise<any>} fn - Async Telegram API call
* @returns {Promise<any>}
*/
function enqueue(chatId, fn) {
const key = chatId != null ? String(chatId) : '_global';
const prevTail = _chatQueues.get(key) ?? Promise.resolve();
let resolve, reject;
const resultP = new Promise((res, rej) => { resolve = res; reject = rej; });
const newTail = prevTail
.then(async () => {
try {
const result = await _globalSlot(fn);
resolve(result);
} catch (err) {
reject(err);
}
// Delay next call for this chat
await _delay(CHAT_GAP_MS);
})
.catch(() => {
// Never break per-chat chain
});
_chatQueues.set(key, newTail);
// Cleanup stale entries
newTail.then(() => {
if (_chatQueues.get(key) === newTail) {
_chatQueues.delete(key);
}
}).catch(() => {});
return resultP;
}
// ─── Public API: global throttle only ─────────────────────────────────────────
/**
* Global throttle only — no per-chat serialization.
* Use for answerCallbackQuery, editMessageText, etc.
*
* @param {() => Promise<any>} fn
* @returns {Promise<any>}
*/
function globalThrottle(fn) {
return _globalSlot(fn);
}
// ─── Diagnostics ─────────────────────────────────────────────────────────────
/**
* Return a snapshot of rate-limiter internals for observability.
* @returns {{ pendingQueues: number, globalLastMs: number }}
*/
function stats() {
return {
pendingQueues: _chatQueues.size,
globalLastMs: _globalLast,
};
}
/**
* Return current tuning constants for observability / testall reporting.
* @returns {{ globalGapMs: number, chatGapMs: number, apiCallTimeoutMs: number, jitterMs: number }}
*/
function settings() {
return {
globalGapMs: GLOBAL_GAP_MS,
chatGapMs: CHAT_GAP_MS,
apiCallTimeoutMs: API_CALL_TIMEOUT_MS,
jitterMs: JITTER_MS,
};
}
/**
* Route fn through the rate limiter with config-driven chatId + globalOnly selection.
* Optionally calls onWait(predictedWaitMs, config) before scheduling.
*
* Used by /testall and scripts/testall_runtime.js so the audit simulation
* respects the same rate limits as live traffic.
*
* @param {{ chatId?: string|number|null, globalOnly?: boolean, onWait?: Function }} opts
* @param {() => Promise<any>} fn
* @returns {Promise<any>}
*/
function throttleWithConfig({ chatId = null, globalOnly = false, onWait = null } = {}, fn) {
const cfg = settings();
const predictedWaitMs = Math.max(0, _globalLast + cfg.globalGapMs - Date.now());
if (predictedWaitMs > 0 && typeof onWait === 'function') {
try { onWait(predictedWaitMs, cfg); } catch (_) {}
}
if (globalOnly) return globalThrottle(fn);
return enqueue(chatId, fn);
}
module.exports = { enqueue, globalThrottle, stats, settings, throttleWithConfig };