Complete implementation ready for Coolify: - Node.js 22 + Fastify + socket.io backend - PostgreSQL 16 + Redis 7 services - Docker Compose configuration - Deployment scripts and documentation Co-Authored-By: Paperclip <noreply@paperclip.ing>
155 lines
4.7 KiB
TypeScript
155 lines
4.7 KiB
TypeScript
#!/usr/bin/env tsx
|
|
/**
|
|
* Load test: 20 concurrent agents sending messages
|
|
* Target: p99 latency < 100ms on LAN
|
|
*/
|
|
|
|
import { io, type Socket } from 'socket.io-client';
|
|
|
|
const NUM_AGENTS = 20;
|
|
const MESSAGES_PER_AGENT = 50;
|
|
const BASE_URL = process.env.TEST_URL || 'http://localhost:3000';
|
|
|
|
interface MessageLatency {
|
|
agentId: number;
|
|
messageNum: number;
|
|
latency: number;
|
|
}
|
|
|
|
const latencies: MessageLatency[] = [];
|
|
let messagesReceived = 0;
|
|
const totalMessages = NUM_AGENTS * MESSAGES_PER_AGENT;
|
|
|
|
async function createAgent(agentIndex: number, jwt: string, roomId: string): Promise<void> {
|
|
return new Promise((resolve, reject) => {
|
|
const socket: Socket = io(`${BASE_URL}/agents`, {
|
|
auth: { jwt },
|
|
transports: ['websocket'],
|
|
});
|
|
|
|
socket.on('connect_error', (err) => {
|
|
console.error(`Agent ${agentIndex} connection error:`, err.message);
|
|
reject(err);
|
|
});
|
|
|
|
socket.on('agent:hello-ack', () => {
|
|
console.log(`Agent ${agentIndex} connected`);
|
|
|
|
// Send messages
|
|
for (let i = 0; i < MESSAGES_PER_AGENT; i++) {
|
|
setTimeout(() => {
|
|
const startTime = Date.now();
|
|
socket.emit(
|
|
'message:send',
|
|
{
|
|
roomId,
|
|
body: `Load test message ${i} from agent ${agentIndex}`,
|
|
},
|
|
(response: { messageId?: string; error?: string }) => {
|
|
const latency = Date.now() - startTime;
|
|
messagesReceived++;
|
|
|
|
if (response.error) {
|
|
console.error(`Agent ${agentIndex} message ${i} failed:`, response.error);
|
|
} else {
|
|
latencies.push({ agentId: agentIndex, messageNum: i, latency });
|
|
}
|
|
|
|
if (messagesReceived === totalMessages) {
|
|
socket.disconnect();
|
|
resolve();
|
|
}
|
|
},
|
|
);
|
|
}, i * 100); // 100ms between messages per agent
|
|
}
|
|
});
|
|
|
|
socket.on('error', (err) => {
|
|
console.error(`Agent ${agentIndex} error:`, err);
|
|
});
|
|
});
|
|
}
|
|
|
|
async function main(): Promise<void> {
|
|
console.log(`Starting load test: ${NUM_AGENTS} agents, ${MESSAGES_PER_AGENT} messages each`);
|
|
console.log(`Target: ${totalMessages} total messages\n`);
|
|
|
|
// These would come from your test setup
|
|
// For now, we'll just log what's needed
|
|
console.log('Prerequisites:');
|
|
console.log('1. AgentHub server running at', BASE_URL);
|
|
console.log('2. Test agents created with API tokens');
|
|
console.log('3. Test room created and agents added as members');
|
|
console.log('4. Export TEST_JWT_1, TEST_JWT_2, ..., TEST_JWT_20');
|
|
console.log('5. Export TEST_ROOM_ID\n');
|
|
|
|
const jwtTokens: string[] = [];
|
|
for (let i = 1; i <= NUM_AGENTS; i++) {
|
|
const jwt = process.env[`TEST_JWT_${i}`];
|
|
if (!jwt) {
|
|
console.error(`Missing TEST_JWT_${i} environment variable`);
|
|
process.exit(1);
|
|
}
|
|
jwtTokens.push(jwt);
|
|
}
|
|
|
|
const roomId = process.env.TEST_ROOM_ID;
|
|
if (!roomId) {
|
|
console.error('Missing TEST_ROOM_ID environment variable');
|
|
process.exit(1);
|
|
}
|
|
|
|
const startTime = Date.now();
|
|
|
|
// Create all agents concurrently
|
|
await Promise.all(jwtTokens.map((jwt, index) => createAgent(index + 1, jwt, roomId))).catch(
|
|
(err) => {
|
|
console.error('Load test failed:', err);
|
|
process.exit(1);
|
|
},
|
|
);
|
|
|
|
const totalTime = Date.now() - startTime;
|
|
|
|
// Calculate statistics
|
|
latencies.sort((a, b) => a.latency - b.latency);
|
|
|
|
const p50Index = Math.floor(latencies.length * 0.5);
|
|
const p90Index = Math.floor(latencies.length * 0.9);
|
|
const p99Index = Math.floor(latencies.length * 0.99);
|
|
|
|
const p50 = latencies[p50Index]?.latency || 0;
|
|
const p90 = latencies[p90Index]?.latency || 0;
|
|
const p99 = latencies[p99Index]?.latency || 0;
|
|
|
|
const avg = latencies.reduce((sum, l) => sum + l.latency, 0) / latencies.length;
|
|
const min = latencies[0]?.latency || 0;
|
|
const max = latencies[latencies.length - 1]?.latency || 0;
|
|
|
|
console.log('\n=== Load Test Results ===');
|
|
console.log(`Total messages sent: ${messagesReceived} / ${totalMessages}`);
|
|
console.log(`Total time: ${totalTime}ms`);
|
|
console.log(`Throughput: ${((totalMessages / totalTime) * 1000).toFixed(2)} msg/s\n`);
|
|
|
|
console.log('Latency distribution:');
|
|
console.log(` min: ${min}ms`);
|
|
console.log(` avg: ${avg.toFixed(2)}ms`);
|
|
console.log(` p50: ${p50}ms`);
|
|
console.log(` p90: ${p90}ms`);
|
|
console.log(` p99: ${p99}ms`);
|
|
console.log(` max: ${max}ms\n`);
|
|
|
|
if (p99 < 100) {
|
|
console.log(`✅ PASS: p99 latency (${p99}ms) < 100ms target`);
|
|
process.exit(0);
|
|
} else {
|
|
console.log(`❌ FAIL: p99 latency (${p99}ms) >= 100ms target`);
|
|
process.exit(1);
|
|
}
|
|
}
|
|
|
|
main().catch((err) => {
|
|
console.error('Fatal error:', err);
|
|
process.exit(1);
|
|
});
|