diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 5e9d1d2..ea8e5cd 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -2,7 +2,7 @@ FROM python:3.10-slim # Install Python dependencies -RUN pip install Flask requests tabulate +RUN pip install Flask requests tabulate qrcode WORKDIR /app/static RUN mkdir fonts diff --git a/frontend/main.py b/frontend/main.py index 6e1f2ec..1ee5ff1 100644 --- a/frontend/main.py +++ b/frontend/main.py @@ -2,7 +2,11 @@ import os import requests import subprocess import json -from flask import Flask, render_template_string, jsonify +import zlib +import base64 +import qrcode +from io import StringIO +from flask import Flask, render_template_string, jsonify, request from socket import gethostbyname_ex from datetime import datetime from tabulate import tabulate # For ASCII tables @@ -13,6 +17,7 @@ app = Flask(__name__) # Config REPORTER_SERVICE = os.environ.get("REPORTER_SERVICE", "ntp-reporter-svc.default.svc.cluster.local") +BASE_URL = os.environ.get("BASE_URL", "https://time.dws.rip") # Tracking table config TRACKING_METRICS_ORDER = [ @@ -31,6 +36,36 @@ SOURCES_COLUMNS_ORDER = [ # Define widths for sources table columns SOURCES_COL_WIDTHS = [24, 10, 32, 10, 7, 7, 8, 15, 10] +# Metric Definitions +TRACKING_METRICS_DEFS = { + "Reference ID": "Identifier of current time source (IP or refclock ID)", + "Ref Source IP": "IP address of the reference time source", + "Stratum": "Distance from primary time source (lower is better, 1-16)", + "Ref time (UTC)": "Last time the reference was updated", + "System time": "Offset between system clock and reference time (seconds)", + "Last offset": "Offset of last clock update (seconds)", + "RMS offset": "Root mean square of recent offset values (long-term average)", + "Frequency": "Rate of system clock drift (ppm - parts per million)", + "Residual freq": "Residual frequency error not yet corrected", + "Skew": "Estimated error bound of frequency (accuracy metric)", + "Root delay": "Total network delay to stratum-1 server (seconds)", + "Root dispersion": "Total dispersion accumulated to stratum-1 server", + "Update interval": "Time between clock updates (seconds)", + "Leap status": "Leap second indicator (Normal, Insert, Delete, or Not synced)" +} + +SOURCES_METRICS_DEFS = { + "DWS PEER": "Node identifier for this NTP daemon instance", + "ModeState": "Source mode (^=server, ==peer) & state (*=current sync)", + "Name/IP address": "Hostname or IP address of the NTP source", + "Stratum": "Stratum level of the source (1=primary reference)", + "Poll": "Polling interval to source (log2 seconds, e.g., 6 = 64s)", + "Reach": "Reachability register (377 octal = all 8 recent polls OK)", + "LastRx": "Time since last successful response from source", + "Last sample": "Offset measurement from last valid sample (seconds)", + "Std Dev": "Standard deviation of offset (jitter measurement)" +} + # # HTML Template - Radically simplified for TUI output # @@ -99,10 +134,6 @@ HTML_TEMPLATE = """
-$> ./dws_ntp_report
-**INFO**: INITIALIZING DWS NTP MONITORING SYSTEM
-**INFO**: COLLECTING DWS NTP POOL INFORMATION
-
{{ report_header }}
SECTION 1: CURRENT TIME SYNCHRONIZATION
@@ -116,7 +147,6 @@ CLOCK OFFSET: ---
SECTION 2: NODE TRACKING STATUS METRICS
════════════════════════════════════════════════════════════════════════════════
-**INFO**: COLLECTING TRACKING STATUS METRICS FROM ALL NODES
{{ tracking_table_ascii }}
@@ -125,24 +155,35 @@ CLOCK OFFSET: ---
SECTION 3: UPSTREAM NTP SOURCES
════════════════════════════════════════════════════════════════════════════════
-**INFO**: COLLECTING UPSTREAM SOURCES METRICS FROM ALL NODES
{{ sources_table_ascii }}
════════════════════════════════════════════════════════════════════════════════
-SECTION 4: DEVELOPER INFORMATION
+SECTION 4: METRIC DEFINITIONS & DEVELOPER INFORMATION
════════════════════════════════════════════════════════════════════════════════
+TRACKING METRICS GLOSSARY:
+{{ tracking_glossary }}
+
+SOURCES METRICS GLOSSARY:
+{{ sources_glossary }}
+
+USAGE INFORMATION:
USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
-**INFO**: DWS LLC // "IT'S YOUR INTERNET, TAKE IT BACK" // https://dws.rip
-**INFO**: DWS LLC // UNITED STATES OF AMERICA // 2025
+DWS LLC // "IT'S YOUR INTERNET, TAKE IT BACK" // https://dws.rip
+DWS LLC // UNITED STATES OF AMERICA // 2025
+
+REPORT SNAPSHOT:
+Scan the QR code below to access this exact report state:
+
+{{ report_qr_code }}
════════════════════════════════════════════════════════════════════════════════
-**INFO**: REPORT GENERATION COMPLETE {{ gen_time_utc }}
-**INFO**: END OF REPORT
+REPORT GENERATION COMPLETE {{ gen_time_utc }}
+END OF REPORT
════════════════════════════════════════════════════════════════════════════════
@@ -151,6 +192,7 @@ USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
const clockDateSpan = document.getElementById('clock-date');
const clockStatusSpan = document.getElementById('clock-status');
const clockOffsetSpan = document.getElementById('clock-offset');
+ const isHistorical = {{ 'true' if is_historical else 'false' }};
let serverTimeOffsetMs = null; // Offset between client and DWS server in milliseconds
let clockUpdateInterval = null;
let syncInterval = null;
@@ -159,17 +201,17 @@ USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
if (serverTimeOffsetMs === null) return;
// Apply offset to client's current time to get DWS server time
const now = new Date(new Date().getTime() + serverTimeOffsetMs);
-
+
// Format time and date exactly as requested
const hours = String(now.getUTCHours()).padStart(2, '0');
const minutes = String(now.getUTCMinutes()).padStart(2, '0');
const seconds = String(now.getUTCSeconds()).padStart(2, '0');
const timeString = `${hours}:${minutes}:${seconds}`;
-
- const dateString = now.toLocaleDateString('en-US', {
- weekday: 'long', year: 'numeric', month: 'long', day: 'numeric', timeZone: 'UTC'
+
+ const dateString = now.toLocaleDateString('en-US', {
+ weekday: 'long', year: 'numeric', month: 'long', day: 'numeric', timeZone: 'UTC'
}) + " (UTC)";
-
+
clockTimeSpan.textContent = timeString;
clockDateSpan.textContent = dateString;
}
@@ -185,7 +227,7 @@ USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
const serverTime = new Date(timeData.time_utc).getTime();
const clientTime = new Date().getTime();
serverTimeOffsetMs = serverTime - clientTime; // Recalculate offset
-
+
// Fetch latest fragments to try and get *a* representative offset
// This is slightly tricky as we don't know which node served this page
// We'll just grab the first node's offset for now
@@ -204,18 +246,18 @@ USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
clockStatusSpan.textContent = "Synced";
// Display the offset calculated between *client* and DWS server
- clockOffsetSpan.textContent = `${serverTimeOffsetMs * -1}ms (vs your clock)`;
-
+ clockOffsetSpan.textContent = `${serverTimeOffsetMs * -1}ms (vs your clock)`;
+
// Start interval timer ONLY if it's not already running
if (!clockUpdateInterval) {
updateClock(); // Update immediately
clockUpdateInterval = setInterval(updateClock, 1000);
}
-
+
} catch (error) {
console.error('Error syncing time/offset:', error);
clockStatusSpan.textContent = `Sync Error`;
- clockOffsetSpan.textContent = `---`;
+ clockOffsetSpan.textContent = `---`;
serverTimeOffsetMs = 0; // Fallback to local time
if (!clockUpdateInterval) {
updateClock();
@@ -223,11 +265,44 @@ USE DWS AS YOUR NTP POOL BY SETTING time.dws.rip AS YOUR NTP SOURCE
}
}
}
-
- // Start the sync process when the page loads & repeat
+
+ function displayHistoricalTime(timestampStr) {
+ // Parse the ISO timestamp and display it statically
+ try {
+ const timestamp = new Date(timestampStr);
+
+ const hours = String(timestamp.getUTCHours()).padStart(2, '0');
+ const minutes = String(timestamp.getUTCMinutes()).padStart(2, '0');
+ const seconds = String(timestamp.getUTCSeconds()).padStart(2, '0');
+ const timeString = `${hours}:${minutes}:${seconds}`;
+
+ const dateString = timestamp.toLocaleDateString('en-US', {
+ weekday: 'long', year: 'numeric', month: 'long', day: 'numeric', timeZone: 'UTC'
+ }) + " (UTC)";
+
+ clockTimeSpan.textContent = timeString;
+ clockDateSpan.textContent = dateString;
+ clockStatusSpan.textContent = "Historical Snapshot";
+ clockOffsetSpan.textContent = "N/A";
+ } catch (error) {
+ console.error('Error parsing historical timestamp:', error);
+ clockTimeSpan.textContent = timestampStr;
+ clockDateSpan.textContent = "Historical";
+ clockStatusSpan.textContent = "Snapshot";
+ clockOffsetSpan.textContent = "N/A";
+ }
+ }
+
+ // Start the sync process when the page loads & repeat (only for live reports)
document.addEventListener('DOMContentLoaded', () => {
- syncClockAndOffset();
- syncInterval = setInterval(syncClockAndOffset, 60 * 1000); // Resync every 60 seconds
+ if (isHistorical) {
+ // For historical reports, display static timestamp
+ displayHistoricalTime("{{ gen_time_utc }}");
+ } else {
+ // For live reports, sync clock in real-time
+ syncClockAndOffset();
+ syncInterval = setInterval(syncClockAndOffset, 60 * 1000); // Resync every 60 seconds
+ }
});
@@ -243,7 +318,7 @@ def report_header(report_id, timestamp):
"""Generate a vintage-style report header."""
header = []
header.append(form_feed_separator(80))
- header.append(f"REPORT ID: {report_id}".ljust(40) + f"GENERATED: {timestamp}".rjust(40))
+ header.append(f"GENERATED: {timestamp}".center(80))
header.append(form_feed_separator(80))
return "\n".join(header)
@@ -274,6 +349,65 @@ def format_float(value_str, precision=3):
except:
return value_str # Return original if not a float
+# --- Report ID Encoding/Decoding ---
+def encode_state(fragments):
+ """Encode NTP state into a URL-safe base64 compressed report ID."""
+ try:
+ # Create minimal state representation
+ state = {
+ "timestamp": datetime.utcnow().isoformat() + "Z",
+ "fragments": fragments
+ }
+ # Convert to JSON, compress with zlib, then base64 encode
+ json_str = json.dumps(state, separators=(',', ':')) # Compact JSON
+ compressed = zlib.compress(json_str.encode('utf-8'), level=9)
+ encoded = base64.urlsafe_b64encode(compressed).decode('ascii')
+ return encoded
+ except Exception as e:
+ print(f"Error encoding state: {e}")
+ return None
+
+def decode_state(report_id):
+ """Decode a report ID back into NTP state fragments."""
+ try:
+ # Base64 decode, decompress, parse JSON
+ compressed = base64.urlsafe_b64decode(report_id.encode('ascii'))
+ json_str = zlib.decompress(compressed).decode('utf-8')
+ state = json.loads(json_str)
+ return state.get("fragments", []), state.get("timestamp")
+ except Exception as e:
+ print(f"Error decoding state: {e}")
+ return None, None
+
+def generate_qr_code_ascii(url):
+ """Generate ASCII QR code for the given URL."""
+ try:
+ qr = qrcode.QRCode(
+ version=1, # Small version, will auto-increase if needed
+ error_correction=qrcode.constants.ERROR_CORRECT_L,
+ box_size=1,
+ border=2,
+ )
+ qr.add_data(url)
+ qr.make(fit=True)
+
+ # Get the QR code matrix
+ matrix = qr.get_matrix()
+
+ # Convert to ASCII art using block characters
+ ascii_qr = []
+ for row in matrix:
+ line = ""
+ for cell in row:
+ # Use full block for black, space for white
+ line += "██" if cell else " "
+ ascii_qr.append(line)
+
+ return "\n".join(ascii_qr)
+ except Exception as e:
+ print(f"Error generating QR code: {e}")
+ return f"[QR Code generation failed: {e}]"
+
@app.route('/api/time')
def get_server_time():
return jsonify({"time_utc": datetime.utcnow().isoformat() + "Z"})
@@ -301,27 +435,26 @@ def format_value(value, max_len=25):
return s_val[:max_len-3] + "..."
return s_val
-@app.route('/')
-def homepage():
- fragments = []
+def format_glossary(metrics_defs):
+ """Format metric definitions as terminal-style glossary."""
+ lines = []
+ for metric, definition in metrics_defs.items():
+ lines.append(f" {metric.ljust(20)} - {definition}")
+ return "\n".join(lines)
+
+def render_report(fragments, report_id, gen_time, is_historical=False):
+ """Render NTP report from fragments data."""
error_msg = "No errors."
meta_offset_ms = "N/A"
meta_leap_status = "Unknown"
- ips = get_reporter_ips(REPORTER_SERVICE)
- if not ips: error_msg = f"Could not resolve IPs for service '{REPORTER_SERVICE}'."
- # 1. Fetch fragments
- for ip in ips:
- try:
- res = requests.get(f"http://{ip}:9898/fragment.json", timeout=2)
- if res.status_code == 200: fragments.append(res.json())
- else: print(f"Failed fetch from {ip}: Status {res.status_code}")
- except Exception as e: print(f"Failed connect to {ip}: {e}"); error_msg = str(e)
-
- fragments.sort(key=lambda x: x.get("node_id", "z"))
nodes_list = [f.get("node_id", "unknown") for f in fragments]
- # 2. Generate ASCII Tracking Table with tabulate
+ # Generate QR code for the report URL
+ report_url = f"{BASE_URL}/report/{report_id}"
+ report_qr_code = generate_qr_code_ascii(report_url)
+
+ # Generate ASCII Tracking Table with tabulate
tracking_rows = []
for metric in TRACKING_METRICS_ORDER:
row = [metric]
@@ -354,7 +487,7 @@ def homepage():
total_offset_seconds = 0.0
valid_offset_count = 0
leap_statuses = set()
- # 3. Generate Meta Description Summary (using the first node's data)
+ # Generate Meta Description Summary
if fragments:
for frag in fragments:
tracking = frag.get("tracking", {})
@@ -363,7 +496,7 @@ def homepage():
leap = tracking.get("Leap status")
if leap:
leap_statuses.add(leap)
-
+
# Collect Offset
offset_str = tracking.get("Last offset", 0.1)
try:
@@ -376,14 +509,14 @@ def homepage():
if valid_offset_count > 0:
avg_offset_seconds = total_offset_seconds / valid_offset_count
meta_offset_ms = f"~{(avg_offset_seconds * 1000):.1f}ms" # Use ~ for average
-
+
if len(leap_statuses) == 1:
meta_leap_status = leap_statuses.pop()
elif len(leap_statuses) > 1:
meta_leap_status = "Mixed"
# else remains "Unknown" if no valid status found
- # 3. Generate ASCII Sources Table with tabulate
+ # Generate ASCII Sources Table with tabulate
if not fragments:
sources_table_ascii = "ERROR: Could not fetch data from any reporter pods."
else:
@@ -426,21 +559,81 @@ def homepage():
numalign="right"
)
- gen_time = subprocess.run(["date", "-u", "+%Y-%m-%dT%H:%M:%SZ"], capture_output=True, text=True).stdout.strip()
-
# Generate report header
- report_id = str(uuid.uuid4())[:8].upper()
report_header_text = report_header(report_id, gen_time)
+ # Format glossaries
+ tracking_glossary = format_glossary(TRACKING_METRICS_DEFS)
+ sources_glossary = format_glossary(SOURCES_METRICS_DEFS)
+
return render_template_string(
HTML_TEMPLATE,
gen_time_utc=gen_time,
report_header=report_header_text,
tracking_table_ascii=tracking_table_ascii,
sources_table_ascii=sources_table_ascii,
+ tracking_glossary=tracking_glossary,
+ sources_glossary=sources_glossary,
+ report_qr_code=report_qr_code,
+ is_historical=is_historical,
meta_description=f"DWS NTP Pool: {meta_leap_status}. Avg Offset: {meta_offset_ms}.",
error=error_msg
)
+@app.route('/')
+def homepage():
+ """Live NTP status - fetches current data from all nodes."""
+ fragments = []
+ ips = get_reporter_ips(REPORTER_SERVICE)
+
+ # Fetch fragments from all reporter pods
+ for ip in ips:
+ try:
+ res = requests.get(f"http://{ip}:9898/fragment.json", timeout=2)
+ if res.status_code == 200:
+ fragments.append(res.json())
+ else:
+ print(f"Failed fetch from {ip}: Status {res.status_code}")
+ except Exception as e:
+ print(f"Failed connect to {ip}: {e}")
+
+ fragments.sort(key=lambda x: x.get("node_id", "z"))
+
+ # Generate timestamp and report ID
+ gen_time = subprocess.run(["date", "-u", "+%Y-%m-%dT%H:%M:%SZ"], capture_output=True, text=True).stdout.strip()
+
+ # Create encoded report ID from current state
+ report_id = encode_state(fragments)
+ if not report_id:
+ report_id = str(uuid.uuid4())[:8].upper() # Fallback to random ID
+
+ return render_report(fragments, report_id, gen_time)
+
+@app.route('/report/+ERROR: INVALID REPORT ID + +The report ID you provided could not be decoded. +Please check the URL and try again. + +Return to live status ++ + """), 400 + + # Use the timestamp from the encoded state, or fallback to "HISTORICAL" + gen_time = timestamp if timestamp else "HISTORICAL" + + return render_report(fragments, report_id, gen_time, is_historical=True) + if __name__ == '__main__': app.run(host='0.0.0.0', port=8080)