performance improvements, charts improvements, minor UI improvements

This commit is contained in:
Антон
2026-01-09 17:50:45 +03:00
parent 53a3a99309
commit f64f49a7a6
7 changed files with 337 additions and 179 deletions

View File

@@ -243,11 +243,51 @@ class OpenVPNAPI:
pass
try:
# 4. Формирование запроса
# В агрегированных таблицах нет полей rate_mbps, возвращаем 0
# 4. Request Formation
is_aggregated = target_table != 'usage_history'
if is_aggregated:
# High resolution handling for 1h and 3h ranges
is_high_res = False
interval = 0
points_count = 0
if target_table == 'usage_history':
if duration_hours <= 1.1:
is_high_res = True
interval = 30
points_count = 120
elif duration_hours <= 3.1:
is_high_res = True
interval = 60
points_count = 180
elif duration_hours <= 6.1:
is_high_res = True
interval = 120 # 2 minutes
points_count = 180
elif duration_hours <= 12.1:
is_high_res = True
interval = 300 # 5 minutes
points_count = 144
elif duration_hours <= 24.1:
is_high_res = True
interval = 900 # 15 minutes
points_count = 96
if is_high_res:
query = f'''
SELECT
datetime((strftime('%s', uh.timestamp) / {interval}) * {interval}, 'unixepoch') as timestamp,
SUM(uh.bytes_received) as bytes_received,
SUM(uh.bytes_sent) as bytes_sent,
MAX(uh.bytes_received_rate_mbps) as bytes_received_rate_mbps,
MAX(uh.bytes_sent_rate_mbps) as bytes_sent_rate_mbps
FROM usage_history uh
JOIN clients c ON uh.client_id = c.id
WHERE c.common_name = ? AND uh.timestamp BETWEEN ? AND ?
GROUP BY datetime((strftime('%s', uh.timestamp) / {interval}) * {interval}, 'unixepoch')
ORDER BY timestamp ASC
'''
elif is_aggregated:
query = f'''
SELECT
t.timestamp,
@@ -280,13 +320,44 @@ class OpenVPNAPI:
cursor.execute(query, (common_name, s_str, e_str))
columns = [column[0] for column in cursor.description]
data = [dict(zip(columns, row)) for row in cursor.fetchall()]
db_data_list = [dict(zip(columns, row)) for row in cursor.fetchall()]
final_data = db_data_list
if is_high_res:
# Zero-filling
final_data = []
db_data_map = {row['timestamp']: row for row in db_data_list}
# Align to nearest interval
ts_end = end_date.timestamp()
ts_aligned = ts_end - (ts_end % interval)
aligned_end = datetime.utcfromtimestamp(ts_aligned)
# Generate points
start_generated = aligned_end - timedelta(seconds=points_count * interval)
current = start_generated
for _ in range(points_count):
current += timedelta(seconds=interval)
ts_str = current.strftime('%Y-%m-%d %H:%M:%S')
if ts_str in db_data_map:
final_data.append(db_data_map[ts_str])
else:
final_data.append({
'timestamp': ts_str,
'bytes_received': 0,
'bytes_sent': 0,
'bytes_received_rate_mbps': 0,
'bytes_sent_rate_mbps': 0
})
return {
'data': data,
'data': final_data,
'meta': {
'resolution_used': target_table,
'record_count': len(data),
'resolution_used': target_table + ('_hires' if is_high_res else ''),
'record_count': len(final_data),
'start': s_str,
'end': e_str
}
@@ -342,64 +413,105 @@ class OpenVPNAPI:
'traffic_distribution': {'rx': 0, 'tx': 0}
}
# 1. Определяем таблицу и временную метку
target_table = 'usage_history'
# 1. Configuration
hours = 24
interval_seconds = 900 # 15 min default
target_table = 'usage_history'
if range_arg == '7d':
target_table = 'stats_hourly'
hours = 168 # 7 * 24
hours = 168
interval_seconds = 6300 # 105 min -> 96 points
target_table = 'stats_hourly'
elif range_arg == '30d':
target_table = 'stats_6h' # или stats_daily
hours = 720 # 30 * 24
hours = 720
interval_seconds = 27000 # 450 min -> 96 points
target_table = 'stats_hourly' # Fallback to hourly/raw as needed
# Fallback logic for table existence
try:
# Проверка наличия таблицы
try:
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{target_table}'")
if not cursor.fetchone():
target_table = 'usage_history'
except:
pass
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{target_table}'")
if not cursor.fetchone():
target_table = 'usage_history' # Fallback to raw if aggregated missing
except:
target_table = 'usage_history'
# 2. Глобальная история (График)
# Для агрегированных таблиц поля rate могут отсутствовать, заменяем нулями
try:
# 2. Global History (Chart)
if target_table == 'usage_history':
rate_cols = "SUM(bytes_received_rate_mbps) as total_rx_rate, SUM(bytes_sent_rate_mbps) as total_tx_rate,"
else:
rate_cols = "0 as total_rx_rate, 0 as total_tx_rate,"
# Aggregation Query
# Group by interval_seconds
query_hist = f'''
SELECT
timestamp,
SUM(bytes_received) as total_rx,
SUM(bytes_sent) as total_tx,
{rate_cols}
COUNT(DISTINCT client_id) as active_count
FROM {target_table}
WHERE timestamp >= datetime('now', '-{hours} hours')
GROUP BY timestamp
datetime((strftime('%s', timestamp) / {interval_seconds}) * {interval_seconds}, 'unixepoch') as timestamp,
SUM(total_rx) as total_rx,
SUM(total_tx) as total_tx,
MAX(total_rx_rate) as total_rx_rate,
MAX(total_tx_rate) as total_tx_rate,
MAX(active_count) as active_count
FROM (
SELECT
timestamp,
SUM(bytes_received) as total_rx,
SUM(bytes_sent) as total_tx,
{rate_cols}
COUNT(DISTINCT client_id) as active_count
FROM {target_table}
WHERE timestamp >= datetime('now', '-{hours} hours')
GROUP BY timestamp
) sub
GROUP BY datetime((strftime('%s', timestamp) / {interval_seconds}) * {interval_seconds}, 'unixepoch')
ORDER BY timestamp ASC
'''
cursor.execute(query_hist)
rows = cursor.fetchall()
if rows:
columns = [col[0] for col in cursor.description]
analytics['global_history_24h'] = [dict(zip(columns, row)) for row in rows]
# Максимум клиентов
max_clients = 0
for row in analytics['global_history_24h']:
if row['active_count'] > max_clients:
max_clients = row['active_count']
analytics['max_concurrent_24h'] = max_clients
# 3. Топ-3 самых активных клиентов (за выбранный период)
# Внимание: для топа всегда берем данные, но запрос может быть тяжелым на usage_history за месяц.
# Лучше использовать агрегаты, если период большой.
columns = [col[0] for col in cursor.description]
db_data = {row[0]: dict(zip(columns, row)) for row in rows}
# Используем ту же таблицу, что и для истории, чтобы согласовать данные
# Post-processing: Zero Fill
analytics['global_history_24h'] = []
now = datetime.utcnow()
# Round down to nearest interval
ts_now = now.timestamp()
ts_aligned = ts_now - (ts_now % interval_seconds)
now_aligned = datetime.utcfromtimestamp(ts_aligned)
# We want exactly 96 points ending at now_aligned
# Start time = now_aligned - (96 * interval)
start_time = now_aligned - timedelta(seconds=96 * interval_seconds)
current = start_time
# Generate exactly 96 points
for _ in range(96):
current += timedelta(seconds=interval_seconds)
ts_str = current.strftime('%Y-%m-%d %H:%M:%S')
if ts_str in db_data:
analytics['global_history_24h'].append(db_data[ts_str])
else:
analytics['global_history_24h'].append({
'timestamp': ts_str,
'total_rx': 0,
'total_tx': 0,
'total_rx_rate': 0,
'total_tx_rate': 0,
'active_count': 0
})
# Max Clients metric
max_clients = 0
for row in analytics['global_history_24h']:
if row.get('active_count', 0) > max_clients:
max_clients = row['active_count']
analytics['max_concurrent_24h'] = max_clients
# 3. Top Clients & 4. Traffic Distribution (Keep existing logic)
# Use same target table
query_top = f'''
SELECT
c.common_name,
@@ -417,7 +529,6 @@ class OpenVPNAPI:
top_cols = [col[0] for col in cursor.description]
analytics['top_clients_24h'] = [dict(zip(top_cols, row)) for row in cursor.fetchall()]
# 4. Распределение трафика
query_dist = f'''
SELECT
SUM(bytes_received) as rx,

View File

@@ -1,90 +1,142 @@
# OpenVPN Monitor API v2 Documentation
# OpenVPN Monitor API v3 Documentation
Этот API предоставляет доступ к данным мониторинга OpenVPN, включая статус клиентов в реальном времени и исторические данные, хранящиеся в Time Series Database (TSDB).
This API provides access to OpenVPN monitoring data, including real-time client status and historical data stored in a Time Series Database (TSDB). It features optimized aggregation for fast visualization.
**Base URL:** `http://<your-server-ip>:5001/api/v1`
---
## 1. Статистика по клиенту (Детальная + История)
## 1. Global Analytics (Dashboard)
Основной эндпоинт для построения графиков и отчетов. Поддерживает динамическую агрегацию данных (умный выбор детализации).
Provides aggregated trend data for the entire server. optimized for visualization with exactly **96 data points** regardless of the time range.
### `GET /stats/<common_name>`
### `GET /analytics`
#### Параметры запроса (Query Parameters)
| Параметр | Тип | По умолчанию | Описание |
#### Query Parameters
| Parameter | Type | Default | Description |
| :--- | :--- | :--- | :--- |
| `range` | string | `24h` | Период выборки. Поддерживаются форматы: `24h` (часы), `7d` (дни), `30d`, `1y` (годы). |
| `resolution` | string | `auto` | Принудительная детализация данных. <br>**Значения:**<br>`auto` — автоматический выбор (см. логику ниже)<br>`raw` — сырые данные (каждые 10-30 сек)<br>`5min` — 5 минут<br>`hourly` — 1 час<br>`6h` — 6 часов<br>`daily` — 1 день |
| `range` | string | `24h` | Time range. Supported: `24h`, `7d`, `30d`. |
#### Логика `resolution=auto`
API автоматически выбирает таблицу источника данных в зависимости от длительности диапазона:
* **≤ 24 часов:** `usage_history` (Сырые данные)
* **≤ 7 дней:** `stats_hourly` (Агрегация по часам)
* **≤ 3 месяцев:** `stats_6h` (Агрегация по 6 часов)
* **> 3 месяцев:** `stats_daily` (Агрегация по дням)
#### Пример запроса
```http
GET /api/v1/stats/user-alice?range=7d
```
#### Пример ответа
#### Behavior
* **24h**: Returns 15-minute intervals.
* **7d**: Returns 105-minute intervals (1h 45m).
* **30d**: Returns 450-minute intervals (7h 30m).
* **Zero-Filling**: Missing data periods are automatically filled with zeros to ensure graph continuity.
#### Example Response
```json
{
"success": true,
"timestamp": "2026-01-08 14:30:00",
"timestamp": "2026-01-09 12:00:00",
"range": "24h",
"data": {
"common_name": "user-alice",
"status": "Active",
"real_address": "192.168.1.50:54321",
"last_activity": "N/A",
"current_rates": {
"recv_mbps": 1.5,
"sent_mbps": 0.2
},
"totals": {
"received_mb": 500.25,
"sent_mb": 120.10
},
"meta": {
"resolution_used": "stats_hourly",
"start": "2026-01-01 14:30:00",
"end": "2026-01-08 14:30:00",
"record_count": 168
},
"history": [
"global_history_24h": [
{
"timestamp": "2026-01-01 15:00:00",
"bytes_received": 1048576,
"bytes_sent": 524288,
"bytes_received_rate_mbps": 0,
"bytes_sent_rate_mbps": 0
"timestamp": "2026-01-09 11:45:00",
"total_rx": 102400,
"total_tx": 51200,
"active_count": 5
},
...
]
],
"max_concurrent_24h": 12,
"top_clients_24h": [ ... ],
"traffic_distribution": { "rx": 1000, "tx": 500 }
}
}
```
> **Примечание:** Поля `*_rate_mbps` в массиве `history` возвращают `0` для агрегированных данных (hourly, daily), так как агрегация хранит только суммарный объем трафика.
---
## 2. Текущая статистика (Все клиенты)
## 2. Client Statistics (Detail + History)
Возвращает мгновенный снимок состояния всех известных клиентов.
Main endpoint for individual client reports. Supports **Dynamic Aggregation** to optimize payload size (~98% reduction for 24h view).
### `GET /stats/<common_name>`
#### Query Parameters
| Parameter | Type | Default | Description |
| :--- | :--- | :--- | :--- |
| `range` | string | `24h` | Time range. Formats: `1h`, `3h`, `6h`, `12h`, `24h`, `7d`, `30d`. |
| `resolution` | string | `auto` | Force resolution (optional): `raw`, `5min`, `hourly`, `auto`. |
#### Dynamic Aggregation Logic (`resolution=auto`)
The API automatically selects the aggregation interval based on the requested range to balance detail and performance:
| Range | Resolution | Points | Source Table |
| :--- | :--- | :--- | :--- |
| **1h** | **30 sec** | 120 | `usage_history` (Raw) |
| **3h** | **1 min** | 180 | `usage_history` (Raw) |
| **6h** | **2 min** | 180 | `usage_history` (Raw) |
| **12h** | **5 min** | 144 | `usage_history` (Raw) |
| **24h** | **15 min** | 96 | `usage_history` (Raw) |
| **7d** | **1 Hour** | 168 | `stats_hourly` |
| **30d** | **6 Hours** | 120 | `stats_6h` |
*All short-term ranges (≤24h) include automatic **Zero-Filling**.*
#### Examples: Long-Term Aggregated Data
To explicitly request data from long-term storage tables (skipping raw data), use the `resolution` parameter or specific ranges.
**1. Last 7 Days (Hourly Resolution)**
Uses `stats_hourly` table. Reduced precision for weekly trends.
```http
GET /api/v1/stats/user-alice?range=7d
```
*or explicit resolution:*
```http
GET /api/v1/stats/user-alice?range=7d&resolution=hourly
```
**2. Last 30 Days (6-Hour Resolution)**
Uses `stats_6h` table. Ideal for monthly volume analysis.
```http
GET /api/v1/stats/user-alice?range=30d
```
**3. Last 1 Year (Daily Resolution)**
Uses `stats_daily` table. Extremely lightweight for annual reporting.
```http
GET /api/v1/stats/user-alice?range=1y&resolution=daily
```
#### Example Response
```json
{
"success": true,
"data": {
"common_name": "user-alice",
"status": "Active",
"current_rates": { "recv_mbps": 1.5, "sent_mbps": 0.2 },
"totals": { "received_mb": 500.25, "sent_mb": 120.10 },
"history": [
{
"timestamp": "2026-01-09 11:30:00",
"bytes_received": 5000,
"bytes_sent": 2000,
"bytes_received_rate_mbps": 0.5,
"bytes_sent_rate_mbps": 0.1
},
...
],
"meta": {
"resolution_used": "usage_history_hires",
"record_count": 120
}
}
}
```
---
## 3. Current Statistics (All Clients)
Returns a snapshot of all known clients.
### `GET /stats`
#### Пример ответа
#### Example Response
```json
{
"success": true,
@@ -93,110 +145,63 @@ GET /api/v1/stats/user-alice?range=7d
{
"common_name": "user-alice",
"status": "Active",
"real_address": "192.168.1.50:54321",
"current_recv_rate_mbps": 1.5,
"current_sent_rate_mbps": 0.2,
"total_received_mb": 500.25,
"total_sent_mb": 120.10,
"last_activity": "N/A"
"total_received_mb": 500.2
},
{
"common_name": "user-bob",
"status": "Disconnected",
"real_address": null,
"current_recv_rate_mbps": 0,
"current_sent_rate_mbps": 0,
"total_received_mb": 1500.00,
"total_sent_mb": 300.00,
"last_activity": "2026-01-08 10:00:00"
}
...
]
}
```
---
## 3. Системная статистика
## 4. System Statistics
Сводная информация по всему серверу OpenVPN.
Aggregated metrics for the Server.
### `GET /stats/system`
#### Пример ответа
#### Example Response
```json
{
"success": true,
"data": {
"total_clients": 15,
"active_clients": 3,
"total_bytes_received": 10737418240,
"total_bytes_sent": 5368709120,
"total_received_gb": 10.0,
"total_sent_gb": 5.0
}
}
```
---
## 4. Сертификаты
## 5. Certificates
Информация о сроках действия SSL сертификатов пользователей.
SSL Certificate expiration tracking.
### `GET /certificates`
#### Пример ответа
#### Example Response
```json
{
"success": true,
"data": [
{
"file": "user-alice.crt",
"common_name": "user-alice",
"days_remaining": "360 days",
"is_expired": false,
"not_after": "Jan 8 12:00:00 2027 GMT"
"is_expired": false
}
]
}
```
---
## 5. Вспомогательные методы
### Список клиентов (Упрощенный)
Используется для заполнения выпадающих списков в интерфейсе.
## 6. Utility Methods
### `GET /clients`
```json
{
"success": true,
"data": [
{"common_name": "user-alice", "status": "Active"},
{"common_name": "user-bob", "status": "Disconnected"}
]
}
```
### Проверка здоровья (Health Check)
Проверяет доступность базы данных.
Simple list of clients (Common Name + Status) for UI dropdowns.
### `GET /health`
```json
{
"success": true,
"status": "healthy"
}
```
Database connectivity check. Returns `{"status": "healthy"}`.

View File

@@ -0,0 +1,45 @@
# OpenVPN Data Gatherer Analysis
This report details the internal mechanics of `openvpn_gatherer_v3.py`, responsible for collecting, processing, and storing OpenVPN usage metrics.
## 1. Raw Data Collection (`run_monitoring_cycle`)
The gatherer runs a continuous loop (default interval: **10 seconds**).
### A. Log Parsing
- **Source**: `/var/log/openvpn/openvpn-status.log` (Status File v2, CSV format).
- **Target Fields**: `Common Name`, `Real Address`, `Bytes Sent`, `Bytes Received`.
- **Filtering**: Only lines starting with `CLIENT_LIST`.
### B. Delta Calculation (`update_client_status_and_bytes`)
The log provides *lifetime* counters for a session. The gatherer calculates the traffic *delta* (increment) since the last check.
- **Logic**: `Increment = Current Value - Last Saved Value`.
- **Reset Detection**: If `Current Value < Last Saved Value`, it assumes a session/server restart and counts the entire `Current Value` as the increment.
### C. Rate Calculation
- **Speed**: Calculated as `Increment * 8 / (Interval * 1_000_000)` to get **Mbps**.
- **Storage**: Raw samples (10s resolution) including speed and volume are stored in the `usage_history` table.
## 2. Data Aggregation (TSDB)
To support long-term statistics without storing billions of rows, the `TimeSeriesAggregator` performs real-time rollups into 5 aggregated tables using an `UPSERT` strategy (Insert or update sum).
| Table | Resolution | Timestamp Alignment | Retention (Default) |
|-------|------------|---------------------|---------------------|
| `usage_history` | **10 sec** | Exact time | 7 Days |
| `stats_5min` | **5 min** | 00:00, 00:05... | 14 Days |
| `stats_15min` | **15 min** | 00:00, 00:15... | 28 Days |
| `stats_hourly` | **1 Hour** | XX:00:00 | 90 Days |
| `stats_6h` | **6 Hours** | 00:00, 06:00, 12:00... | 180 Days |
| `stats_daily` | **1 Day** | 00:00:00 | 365 Days |
**Logic**: Every 10s cycle, the calculated `Increment` is added to the sum of *all* relevant overlapping buckets. A single 5MB download contributes immediately to the current 5min, 15min, Hourly, 6h, and Daily counters simultaneously.
## 3. Data Retention
A cleanup job runs once every 24 hours (on day change).
- It executes `DELETE FROM table WHERE timestamp < cutoff_date`.
- Thresholds are configurable in `config.ini` under `[retention]`.
## Summary
The system employs a "Write-Optimized" approach. Instead of calculating heavy aggregates on-read (which would be slow), it pre-calculates them on-write. This ensures instant dashboard loading times even with years of historical data.

View File

@@ -30,11 +30,14 @@
<button class="btn-header" @click="toggleTheme" title="Toggle Theme">
<i class="fas" :class="isDark ? 'fa-sun' : 'fa-moon'" id="themeIcon"></i>
</button>
<button class="btn-header" @click="refreshPage" title="Refresh">
<i class="fas fa-sync-alt" id="refreshIcon"></i>
</button>
</div>
</div>
</div>
<router-view></router-view>
<router-view :key="$route.fullPath + '-' + refreshKey"></router-view>
</div>
</div>
</template>
@@ -46,6 +49,7 @@ import { useAppConfig } from './composables/useAppConfig';
const { loadConfig, isLoaded } = useAppConfig();
const timezoneAbbr = ref(new Date().toLocaleTimeString('en-us',{timeZoneName:'short'}).split(' ')[2] || 'UTC');
const isDark = ref(false);
const refreshKey = ref(0);
const toggleTheme = () => {
isDark.value = !isDark.value;
@@ -54,6 +58,10 @@ const toggleTheme = () => {
localStorage.setItem('theme', theme);
};
const refreshPage = () => {
refreshKey.value++;
};
onMounted(async () => {
await loadConfig();

View File

@@ -18,20 +18,9 @@
@change="loadHistory">
<option value="1h">Last 1 Hour (30s agg)</option>
<option value="3h">Last 3 Hours (1m agg)</option>
<option value="6h">Last 6 Hours (1m agg)</option>
<option value="12h">Last 12 Hours (1m agg)</option>
<option value="24h">Last 24 Hours (1m agg)</option>
<option disabled></option>
<option value="1d">Last 1 Day (15m agg)</option>
<option value="2d">Last 2 Days (15m agg)</option>
<option value="3d">Last 3 Days (15m agg)</option>
<option disabled></option>
<option value="4d">Last 4 Days (1h agg)</option>
<option value="5d">Last 5 Days (1h agg)</option>
<option value="6d">Last 6 Days (1h agg)</option>
<option value="24h">Last 24 Hours (15m agg)</option>
<option value="7d">Last 7 Days (1h agg)</option>
<option value="14d">Last 14 Days (1h agg)</option>
<option value="30d">Last 1 Month (1h agg)</option>
<option value="30d">Last 30 Days (6h agg)</option>
</select>
</div>

View File

@@ -178,7 +178,7 @@ const expiringCertsList = ref([]);
let cachedHistory = null;
// Helpers
const MAX_CHART_POINTS = 48;
const MAX_CHART_POINTS = 96;
const loadAnalytics = async () => {
loading.analytics = true;