Files
Admin_dash/app/api/status/route.ts
Shivam Patel e47a719d79 Fix 24h bar timezone mismatch and add time-interval uptime calculation
Bars were all grey for 24h view because SQL bucket keys (UTC) never
matched frontend keys (local timezone). Added toUTCKey() helper to
generate UTC keys for all ranges. Replaced check-counting uptime with
time-interval method for 24h/7d: time between consecutive up checks
counts as uptime, intervals involving down checks count as downtime,
uncovered time is unknown.
2026-02-09 04:15:34 -05:00

145 lines
5.3 KiB
TypeScript

import { NextResponse } from 'next/server';
import { getDb } from '@/lib/db';
export const dynamic = 'force-dynamic';
type TimeRange = '24h' | '7d' | '30d' | '365d';
const RANGE_CONFIG: Record<TimeRange, { sqlOffset: string; strftime: string; bars: number }> = {
'24h': { sqlOffset: '-24 hours', strftime: '%Y-%m-%d %H:00', bars: 24 },
'7d': { sqlOffset: '-7 days', strftime: '%Y-%m-%d', bars: 7 },
'30d': { sqlOffset: '-30 days', strftime: '%Y-%m-%d', bars: 30 },
'365d': { sqlOffset: '-365 days', strftime: '%Y-%m', bars: 12 },
};
interface RawLog {
status: string;
timestamp: string;
}
/**
* Time-interval uptime: time between consecutive "up" checks = uptime.
* Intervals involving any "down" check = downtime.
* Remaining uncovered time = unknown.
* Returns uptime / totalPeriod as a percentage.
*/
function calculateTimeUptime(
logs: RawLog[],
periodStartMs: number,
periodEndMs: number,
): number {
const totalMs = periodEndMs - periodStartMs;
if (totalMs <= 0 || logs.length < 2) return 0;
let uptimeMs = 0;
for (let i = 0; i < logs.length - 1; i++) {
const t1 = new Date(logs[i].timestamp).getTime();
const t2 = new Date(logs[i + 1].timestamp).getTime();
const start = Math.max(t1, periodStartMs);
const end = Math.min(t2, periodEndMs);
if (start >= end) continue;
if (logs[i].status === 'up' && logs[i + 1].status === 'up') {
uptimeMs += end - start;
}
}
return totalMs > 0 ? Math.round((uptimeMs / totalMs) * 100) : 0;
}
export async function GET(request: Request) {
try {
const db = await getDb();
const { searchParams } = new URL(request.url);
const range = (searchParams.get('range') || '24h') as TimeRange;
const config = RANGE_CONFIG[range] || RANGE_CONFIG['24h'];
const nowMs = Date.now();
const MS_24H = 24 * 3600_000;
const MS_7D = 7 * 24 * 3600_000;
// Live status (most recent log per service)
const live = await db.all(`
SELECT service_name as name, url, status, latency
FROM uptime_logs
WHERE id IN (SELECT MAX(id) FROM uptime_logs GROUP BY service_name)
`);
// Raw logs for time-interval uptime (8-day window covers 24h + 7d with buffer)
const rawLogs = await db.all(`
SELECT service_name, status, timestamp
FROM uptime_logs
WHERE timestamp > datetime('now', '-8 days')
ORDER BY service_name, timestamp ASC
`);
const rawByService: Record<string, RawLog[]> = {};
for (const row of rawLogs) {
if (!rawByService[row.service_name]) rawByService[row.service_name] = [];
rawByService[row.service_name].push({ status: row.status, timestamp: row.timestamp });
}
// Lifetime stats (check-counting — efficient for large datasets)
const statsLifetime = await db.all(`
SELECT service_name,
count(*) as total,
sum(case when status = 'up' then 1 else 0 end) as up_count
FROM uptime_logs
GROUP BY service_name
`);
// Average latency (24h, up checks only)
const avgLatencyRows = await db.all(`
SELECT service_name,
ROUND(AVG(latency)) as avg_latency
FROM uptime_logs
WHERE timestamp > datetime('now', '-24 hours')
AND status = 'up' AND latency > 0
GROUP BY service_name
`);
// Bucketed history for bar display
const history = await db.all(`
SELECT service_name,
strftime('${config.strftime}', timestamp) as bucket,
count(*) as total,
sum(case when status = 'up' then 1 else 0 end) as up_count
FROM uptime_logs
WHERE timestamp > datetime('now', '${config.sqlOffset}')
GROUP BY service_name, bucket
ORDER BY bucket ASC
`);
const historyMap: Record<string, Array<{ bucket: string; up_count: number; total: number }>> = {};
for (const row of history) {
if (!historyMap[row.service_name]) historyMap[row.service_name] = [];
historyMap[row.service_name].push({
bucket: row.bucket,
up_count: row.up_count,
total: row.total,
});
}
// Merge results
const results = live.map(l => {
const logs = rawByService[l.name] || [];
const avgLat = avgLatencyRows.find(s => s.service_name === l.name);
const sLife = statsLifetime.find(s => s.service_name === l.name);
return {
...l,
uptime24h: calculateTimeUptime(logs, nowMs - MS_24H, nowMs),
uptime7d: calculateTimeUptime(logs, nowMs - MS_7D, nowMs),
uptimeLifetime: sLife ? Math.round((sLife.up_count / sLife.total) * 100) : 0,
avgLatency24h: avgLat ? avgLat.avg_latency : 0,
history: historyMap[l.name] || [],
};
});
return NextResponse.json(results);
} catch (error) {
console.error('Uptime stats error:', error);
return NextResponse.json([]);
}
}