Cache Aggressively
Use caching to reduce unnecessary API calls
The iRacing API enforces rate limits to ensure fair usage and system stability. The SDK detects rate limit responses and throws an IRacingError with isRateLimited = true, but does not automatically retry or throttle requests.
Instead of building your own rate limiter, you can provide a store that the SDK’s underlying HttpClient will use to throttle requests automatically:
import { IRacingDataClient } from 'iracing-data-client';import { MemoryRateLimitStore } from '@http-client-toolkit/store-memory'; // example
const iracing = new IRacingDataClient({ auth: { /* ... */ }, stores: { rateLimit: new MemoryRateLimitStore({ maxRequests: 100, windowMs: 60_000 }) }});
// Requests are now automatically throttled — no wrapper neededconst data = await iracing.member.info();The stores option is completely optional. When omitted, the SDK behaves exactly as before.
Check the @http-client-toolkit organisation for available store packages, or implement the RateLimitStore interface yourself.
The SDK throws an IRacingError when the API returns a 429 status:
import { IRacingDataClient, IRacingError } from 'iracing-data-client';
try { const data = await iracing.track.get();} catch (error) { if (error instanceof IRacingError && error.isRateLimited) { console.log('Rate limit exceeded!'); console.log('Status:', error.status); // 429 }}Gradually increase wait time between retries:
async function withExponentialBackoff<T>( fn: () => Promise<T>, maxRetries = 5, baseDelay = 1000): Promise<T> { let lastError: Error;
for (let i = 0; i < maxRetries; i++) { try { return await fn(); } catch (error) { lastError = error as Error;
if (error instanceof IRacingError && error.isRateLimited) { const delay = baseDelay * Math.pow(2, i); const jitter = Math.random() * 1000;
console.log(`Rate limited. Retry ${i + 1}/${maxRetries} in ${delay}ms`); await new Promise((r) => setTimeout(r, delay + jitter)); } else { throw error; } } }
throw lastError!;}
// Usageconst data = await withExponentialBackoff(() => iracing.track.get());Proactive concurrency control will be built into the SDK. Until then, you can implement your own:
class RateLimiter { private queue: Array<() => void> = []; private running = 0;
constructor( private maxConcurrent = 2, private minTime = 100 // ms between requests ) {}
async execute<T>(fn: () => Promise<T>): Promise<T> { await this.waitForSlot();
this.running++; const startTime = Date.now();
try { return await fn(); } finally { const elapsed = Date.now() - startTime; if (elapsed < this.minTime) { await new Promise((r) => setTimeout(r, this.minTime - elapsed)); }
this.running--; this.processQueue(); } }
private waitForSlot(): Promise<void> { if (this.running < this.maxConcurrent) { return Promise.resolve(); }
return new Promise((resolve) => { this.queue.push(resolve); }); }
private processQueue() { if (this.queue.length > 0 && this.running < this.maxConcurrent) { const next = this.queue.shift(); next?.(); } }}
// Usageconst limiter = new RateLimiter(2, 100);
const results = await Promise.all([ limiter.execute(() => iracing.car.get()), limiter.execute(() => iracing.track.get()), limiter.execute(() => iracing.series.get()),]);A token bucket algorithm will be available as a built-in option. Here’s a reference implementation:
class TokenBucket { private tokens: number; private lastRefill: number;
constructor( private capacity = 10, private refillRate = 1, // tokens per second private refillInterval = 1000 // ms ) { this.tokens = capacity; this.lastRefill = Date.now(); }
async acquire(tokens = 1): Promise<void> { await this.refill();
while (this.tokens < tokens) { await new Promise((r) => setTimeout(r, this.refillInterval)); await this.refill(); }
this.tokens -= tokens; }
private async refill() { const now = Date.now(); const elapsed = now - this.lastRefill; const tokensToAdd = Math.floor((elapsed / 1000) * this.refillRate);
if (tokensToAdd > 0) { this.tokens = Math.min(this.capacity, this.tokens + tokensToAdd); this.lastRefill = now; } }
getAvailableTokens(): number { return this.tokens; }}
// Usageconst bucket = new TokenBucket(10, 2); // 10 tokens, 2/sec refill
async function makeRequest() { await bucket.acquire(1); return iracing.track.get();}Reduce API calls by batching requests:
// Instead of multiple individual requestsconst member1 = await iracing.member.get({ custIds: [123456] });const member2 = await iracing.member.get({ custIds: [789012] });
// Use a single batched request (up to 50 IDs)const allMembers = await iracing.member.get({ custIds: [123456, 789012, 345678],});async function processInChunks<T, R>( items: T[], processor: (item: T) => Promise<R>, chunkSize = 5, delay = 1000): Promise<R[]> { const results: R[] = [];
for (let i = 0; i < items.length; i += chunkSize) { const chunk = items.slice(i, i + chunkSize); const chunkResults = await Promise.all(chunk.map(processor)); results.push(...chunkResults);
if (i + chunkSize < items.length) { await new Promise((r) => setTimeout(r, delay)); } }
return results;}A built-in request queue with rate limiting is planned. Until then, implement your own:
class RequestQueue { private queue: Array<{ fn: () => Promise<any>; resolve: (value: any) => void; reject: (error: any) => void; }> = []; private processing = false; private requestCount = 0; private windowStart = Date.now();
constructor( private maxRequests = 100, private windowMs = 60000 // 1 minute ) {}
async add<T>(fn: () => Promise<T>): Promise<T> { return new Promise((resolve, reject) => { this.queue.push({ fn, resolve, reject }); this.process(); }); }
private async process() { if (this.processing) return; this.processing = true;
while (this.queue.length > 0) { if (this.shouldWait()) { await this.waitForReset(); }
const item = this.queue.shift(); if (!item) break;
try { const result = await item.fn(); item.resolve(result); this.requestCount++; } catch (error) { item.reject(error);
// If rate limited, put back in queue if (error instanceof IRacingError && error.isRateLimited) { this.queue.unshift(item); await this.waitForReset(); } }
// Small delay between requests await new Promise((r) => setTimeout(r, 50)); }
this.processing = false; }
private shouldWait(): boolean { const now = Date.now();
if (now - this.windowStart > this.windowMs) { this.windowStart = now; this.requestCount = 0; return false; }
return this.requestCount >= this.maxRequests; }
private async waitForReset() { const waitTime = this.windowMs - (Date.now() - this.windowStart); console.log(`Rate limit reached. Waiting ${waitTime}ms`); await new Promise((r) => setTimeout(r, waitTime)); this.windowStart = Date.now(); this.requestCount = 0; }}
// Usageconst queue = new RequestQueue(100, 60000);
const data = await queue.add(() => iracing.track.get());Track and log rate limit usage:
class RateLimitMonitor { private attempts = 0; private rateLimitHits = 0; private successfulRequests = 0; private startTime = Date.now();
async execute<T>(fn: () => Promise<T>): Promise<T> { this.attempts++;
try { const result = await fn(); this.successfulRequests++; return result; } catch (error) { if (error instanceof IRacingError && error.isRateLimited) { this.rateLimitHits++; console.warn(`Rate limit hit! Total: ${this.rateLimitHits}`); } throw error; } }
getStats() { const elapsed = (Date.now() - this.startTime) / 1000; const requestsPerSecond = this.successfulRequests / elapsed;
return { attempts: this.attempts, successful: this.successfulRequests, rateLimitHits: this.rateLimitHits, failureRate: `${((this.rateLimitHits / this.attempts) * 100).toFixed(2)}%`, requestsPerSecond: requestsPerSecond.toFixed(2), uptime: `${elapsed.toFixed(0)}s`, }; }}
// Usageconst monitor = new RateLimitMonitor();
const data = await monitor.execute(() => iracing.track.get());
// Check stats periodicallysetInterval(() => { console.log('Rate Limit Stats:', monitor.getStats());}, 30000);Cache Aggressively
Use caching to reduce unnecessary API calls
Batch Requests
Combine multiple requests when the API supports it
Add Delays
Include small delays between requests to avoid bursts
Handle Gracefully
Implement proper retry logic with backoff
Don't Hammer
Avoid rapid successive requests to the same endpoint
Don't Ignore Limits
Respect rate limit errors and wait before retrying