Documentation Index
Fetch the complete documentation index at: https://docs.boostgpt.co/llms.txt
Use this file to discover all available pages before exploring further.
Rate Limits by Plan
| Plan | Requests/min | Requests/day |
|---|
| Free | 20 | 1,000 |
| Starter | 60 | 10,000 |
| Pro | 200 | 50,000 |
| Scale | Custom | Custom |
API responses include rate limit information:
X-RateLimit-Limit: 60
X-RateLimit-Remaining: 45
X-RateLimit-Reset: 1704110400
Handle Rate Limits
Check rate limit headers before making requests:
async function makeRequest(client, params) {
const response = await client.chat(params);
if (response.err && response.err.includes('Rate limit')) {
const retryAfter = 60; // seconds
console.log(`Rate limited. Retry after ${retryAfter}s`);
await sleep(retryAfter * 1000);
return makeRequest(client, params);
}
return response;
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
Exponential Backoff
Implement exponential backoff for retries:
async function requestWithBackoff(client, params, maxRetries = 5) {
for (let i = 0; i < maxRetries; i++) {
const response = await client.chat(params);
if (!response.err || !response.err.includes('Rate limit')) {
return response;
}
const delay = Math.min(1000 * Math.pow(2, i), 32000); // Max 32s
console.log(`Retry ${i + 1}/${maxRetries} in ${delay}ms`);
await sleep(delay);
}
return { err: 'Max retries exceeded', response: null };
}
Batch Requests
Process multiple items efficiently:
async function batchProcess(client, items, batchSize = 5) {
const results = [];
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchResults = await Promise.all(
batch.map(item => client.chat({
bot_id: item.bot_id,
message: item.message
}))
);
results.push(...batchResults);
// Wait between batches to avoid rate limits
if (i + batchSize < items.length) {
await sleep(1000);
}
}
return results;
}
Request Queue
Use a queue for high-volume applications:
class RequestQueue {
constructor(client, requestsPerMinute = 60) {
this.client = client;
this.queue = [];
this.processing = false;
this.interval = 60000 / requestsPerMinute; // ms between requests
}
async add(params) {
return new Promise((resolve) => {
this.queue.push({ params, resolve });
if (!this.processing) {
this.process();
}
});
}
async process() {
this.processing = true;
while (this.queue.length > 0) {
const { params, resolve } = this.queue.shift();
const response = await this.client.chat(params);
resolve(response);
if (this.queue.length > 0) {
await sleep(this.interval);
}
}
this.processing = false;
}
}
// Usage
const queue = new RequestQueue(client, 60);
const response = await queue.add({ bot_id, message });
Monitor Usage
Track your API usage:
let requestCount = 0;
let resetTime = Date.now() + 60000;
async function trackedRequest(client, params) {
if (Date.now() >= resetTime) {
requestCount = 0;
resetTime = Date.now() + 60000;
}
if (requestCount >= 60) {
const waitTime = resetTime - Date.now();
console.log(`Rate limit reached. Waiting ${waitTime}ms`);
await sleep(waitTime);
requestCount = 0;
resetTime = Date.now() + 60000;
}
requestCount++;
return await client.chat(params);
}
Best Practices
- Cache responses - Cache common queries to reduce API calls
- Use webhooks - Receive events instead of polling
- Batch operations - Group multiple requests when possible
- Monitor limits - Track usage to avoid hitting limits
- Upgrade plan - Consider upgrading for higher limits
Next Steps
Troubleshooting
Debug common issues