init commit

This commit is contained in:
Антон
2026-01-09 01:05:50 +03:00
commit c9af0a5bb1
28 changed files with 3934 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

32
APP/config.ini Normal file
View File

@@ -0,0 +1,32 @@
[api]
host = 0.0.0.0
port = 5000
debug = false
[openvpn_monitor]
log_path = /etc/openvpn/openvpn-status.log
db_path = /opt/ovpmon/openvpn_monitor.db
check_interval = 10
data_retention_days = 90
cleanup_interval_hours = 24
[logging]
level = INFO
log_file = /opt/ovpmon/openvpn_monitor.log
[visualization]
refresh_interval = 5
max_display_rows = 50
[certificates]
certificates_path = /opt/ovpn/pki/issued
certificate_extensions = crt
[retention]
raw_retention_days = 7
agg_5m_retention_days = 14
agg_15m_retention_days = 28
agg_1h_retention_days = 90
agg_6h_retention_days = 180
agg_1d_retention_days = 365

91
APP/db.py Normal file
View File

@@ -0,0 +1,91 @@
import sqlite3
import configparser
import os
import logging
class DatabaseManager:
def __init__(self, config_file='config.ini'):
self.config_file = config_file
self.config = configparser.ConfigParser()
self.logger = logging.getLogger(__name__)
self.load_config()
def load_config(self):
if os.path.exists(self.config_file):
self.config.read(self.config_file)
self.db_path = self.config.get('openvpn_monitor', 'db_path', fallback='openvpn_monitor.db')
def get_connection(self):
"""Get a database connection"""
return sqlite3.connect(self.db_path)
def init_database(self):
"""Initialize the database schema"""
# Create directory if needed
db_dir = os.path.dirname(self.db_path)
if db_dir and not os.path.exists(db_dir):
try:
os.makedirs(db_dir)
except OSError:
pass
self.logger.info(f"Using database: {self.db_path}")
conn = self.get_connection()
cursor = conn.cursor()
try:
# 1. Clients Table
cursor.execute('''
CREATE TABLE IF NOT EXISTS clients (
id INTEGER PRIMARY KEY AUTOINCREMENT,
common_name TEXT UNIQUE NOT NULL,
real_address TEXT,
status TEXT DEFAULT 'Active',
total_bytes_received INTEGER DEFAULT 0,
total_bytes_sent INTEGER DEFAULT 0,
last_bytes_received INTEGER DEFAULT 0,
last_bytes_sent INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
last_activity TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# 2. Raw Usage History
cursor.execute('''
CREATE TABLE IF NOT EXISTS usage_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
client_id INTEGER,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
bytes_received INTEGER,
bytes_sent INTEGER,
bytes_received_rate_mbps REAL,
bytes_sent_rate_mbps REAL,
FOREIGN KEY (client_id) REFERENCES clients (id)
)
''')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_usage_ts ON usage_history(timestamp)')
# 3. Aggregated Stats Tables
tables = ['stats_5min', 'stats_15min', 'stats_hourly', 'stats_6h', 'stats_daily']
for table in tables:
cursor.execute(f'''
CREATE TABLE IF NOT EXISTS {table} (
timestamp TEXT NOT NULL,
client_id INTEGER NOT NULL,
bytes_received INTEGER DEFAULT 0,
bytes_sent INTEGER DEFAULT 0,
PRIMARY KEY (timestamp, client_id),
FOREIGN KEY (client_id) REFERENCES clients (id)
)
''')
cursor.execute(f'CREATE INDEX IF NOT EXISTS idx_{table}_ts ON {table}(timestamp)')
conn.commit()
self.logger.info("Database initialized with full schema")
except Exception as e:
self.logger.error(f"Database initialization error: {e}")
finally:
conn.close()

574
APP/openvpn_api_v3.py Normal file
View File

@@ -0,0 +1,574 @@
import sqlite3
import configparser
from datetime import datetime, timedelta, timezone
from flask import Flask, jsonify, request
from flask_cors import CORS
import logging
import subprocess
import os
from pathlib import Path
import re
from db import DatabaseManager
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
app = Flask(__name__)
CORS(app) # Enable CORS for all routes
class OpenVPNAPI:
def __init__(self, config_file='config.ini'):
self.db_manager = DatabaseManager(config_file)
self.config = configparser.ConfigParser()
self.config.read(config_file)
self.certificates_path = self.config.get('certificates', 'certificates_path', fallback='/etc/openvpn/certs')
self.cert_extensions = self.config.get('certificates', 'certificate_extensions', fallback='crt,pem,key').split(',')
def get_db_connection(self):
"""Get a database connection"""
return self.db_manager.get_connection()
# --- БЛОК РАБОТЫ С СЕРТИФИКАТАМИ (Оставлен без изменений) ---
def parse_openssl_date(self, date_str):
try:
parts = date_str.split()
if len(parts[1]) == 1:
parts[1] = f' {parts[1]}'
normalized_date = ' '.join(parts)
return datetime.strptime(normalized_date, '%b %d %H:%M:%S %Y GMT')
except ValueError:
try:
return datetime.strptime(date_str, '%b %d %H:%M:%S %Y %Z')
except ValueError:
logger.warning(f"Could not parse date: {date_str}")
return datetime.min
def calculate_days_remaining(self, not_after_str):
if not_after_str == 'N/A': return 'N/A'
try:
expiration_date = self.parse_openssl_date(not_after_str)
if expiration_date == datetime.min: return 'N/A'
days_remaining = (expiration_date - datetime.now()).days
if days_remaining < 0: return f"Expired ({abs(days_remaining)} days ago)"
else: return f"{days_remaining} days"
except Exception: return 'N/A'
def extract_cert_info(self, cert_file):
# Существующая логика парсинга через openssl
try:
result = subprocess.run(['openssl', 'x509', '-in', cert_file, '-noout', '-text'],
capture_output=True, text=True, check=True)
output = result.stdout
data = {'file': os.path.basename(cert_file), 'file_path': cert_file, 'subject': 'N/A',
'issuer': 'N/A', 'not_after': 'N/A'}
for line in output.split('\n'):
line = line.strip()
if line.startswith('Subject:'):
data['subject'] = line.split('Subject:', 1)[1].strip()
cn_match = re.search(r'CN=([^,]+)', data['subject'])
if cn_match: data['common_name'] = cn_match.group(1)
elif 'Not After' in line:
data['not_after'] = line.split(':', 1)[1].strip()
if data['not_after'] != 'N/A':
data['sort_date'] = self.parse_openssl_date(data['not_after']).isoformat()
else:
data['sort_date'] = datetime.min.isoformat()
data['days_remaining'] = self.calculate_days_remaining(data['not_after'])
data['is_expired'] = 'Expired' in data['days_remaining']
return data
except Exception as e:
logger.error(f"Error processing {cert_file}: {e}")
return None
def get_certificates_info(self):
cert_path = Path(self.certificates_path)
if not cert_path.exists(): return []
cert_files = []
for ext in self.cert_extensions:
cert_files.extend(cert_path.rglob(f'*.{ext.strip()}'))
cert_data = []
for cert_file in cert_files:
data = self.extract_cert_info(str(cert_file))
if data: cert_data.append(data)
return cert_data
# -----------------------------------------------------------
def get_current_stats(self):
"""Get current statistics for all clients"""
conn = self.get_db_connection()
cursor = conn.cursor()
try:
# ИЗМЕНЕНИЕ:
# Вместо "ORDER BY timestamp DESC LIMIT 1" (мгновенное значение),
# мы берем "MAX(rate)" за последние 2 минуты.
# Это фильтрует "нули", возникающие из-за рассинхрона записи логов,
# и показывает реальную пропускную способность канала.
cursor.execute('''
SELECT
c.common_name,
c.real_address,
c.status,
CASE
WHEN c.status = 'Active' THEN 'N/A'
ELSE strftime('%Y-%m-%d %H:%M:%S', c.last_activity)
END as last_activity,
c.total_bytes_received,
c.total_bytes_sent,
-- Пиковая скорость Download за последние 2 минуты
(SELECT MAX(uh.bytes_received_rate_mbps)
FROM usage_history uh
WHERE uh.client_id = c.id
AND uh.timestamp >= datetime('now', '-30 seconds')) as current_recv_rate,
-- Пиковая скорость Upload за последние 2 минуты
(SELECT MAX(uh.bytes_sent_rate_mbps)
FROM usage_history uh
WHERE uh.client_id = c.id
AND uh.timestamp >= datetime('now', '-30 seconds')) as current_sent_rate,
strftime('%Y-%m-%d %H:%M:%S', c.updated_at) as last_updated
FROM clients c
ORDER BY c.status DESC, c.common_name
''')
columns = [column[0] for column in cursor.description]
data = []
for row in cursor.fetchall():
data.append(dict(zip(columns, row)))
return data
except Exception as e:
logger.error(f"Error fetching data: {e}")
return []
finally:
conn.close()
def get_client_history(self, common_name, start_date=None, end_date=None, resolution='auto'):
"""
Получение истории с поддержкой агрегации (TSDB).
Автоматически выбирает таблицу (Raw, Hourly, Daily) в зависимости от периода.
"""
conn = self.get_db_connection()
cursor = conn.cursor()
# 1. Установка временных рамок
if not end_date:
end_date = datetime.now()
if not start_date:
start_date = end_date - timedelta(hours=24) # Дефолт - сутки
# Убедимся, что даты - это объекты datetime
if isinstance(start_date, str):
try: start_date = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')
except: pass
if isinstance(end_date, str):
try: end_date = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')
except: pass
duration_hours = (end_date - start_date).total_seconds() / 3600
# 2. Маппинг разрешений на таблицы
table_map = {
'raw': 'usage_history',
'5min': 'stats_5min',
'15min': 'stats_15min',
'hourly': 'stats_hourly',
'6h': 'stats_6h',
'daily': 'stats_daily'
}
target_table = 'usage_history'
# 3. Логика выбора таблицы
if resolution == 'auto':
if duration_hours <= 24:
target_table = 'usage_history' # Сырые данные (график за день)
elif duration_hours <= 168: # до 7 дней
target_table = 'stats_hourly' # По часам
elif duration_hours <= 2160: # до 3 месяцев
target_table = 'stats_6h' # Каждые 6 часов
else:
target_table = 'stats_daily' # По дням
elif resolution in table_map:
target_table = table_map[resolution]
# Проверка существования таблицы (fallback, если миграции не было)
try:
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{target_table}'")
if not cursor.fetchone():
logger.warning(f"Table {target_table} missing, fallback to usage_history")
target_table = 'usage_history'
except:
pass
try:
# 4. Формирование запроса
# В агрегированных таблицах нет полей rate_mbps, возвращаем 0
is_aggregated = target_table != 'usage_history'
if is_aggregated:
query = f'''
SELECT
t.timestamp,
t.bytes_received,
t.bytes_sent,
0 as bytes_received_rate_mbps,
0 as bytes_sent_rate_mbps
FROM {target_table} t
JOIN clients c ON t.client_id = c.id
WHERE c.common_name = ? AND t.timestamp BETWEEN ? AND ?
ORDER BY t.timestamp ASC
'''
else:
query = f'''
SELECT
uh.timestamp,
uh.bytes_received,
uh.bytes_sent,
uh.bytes_received_rate_mbps,
uh.bytes_sent_rate_mbps
FROM usage_history uh
JOIN clients c ON uh.client_id = c.id
WHERE c.common_name = ? AND uh.timestamp BETWEEN ? AND ?
ORDER BY uh.timestamp ASC
'''
s_str = start_date.strftime('%Y-%m-%d %H:%M:%S')
e_str = end_date.strftime('%Y-%m-%d %H:%M:%S')
cursor.execute(query, (common_name, s_str, e_str))
columns = [column[0] for column in cursor.description]
data = [dict(zip(columns, row)) for row in cursor.fetchall()]
return {
'data': data,
'meta': {
'resolution_used': target_table,
'record_count': len(data),
'start': s_str,
'end': e_str
}
}
except Exception as e:
logger.error(f"Error fetching history: {e}")
return {'data': [], 'error': str(e)}
finally:
conn.close()
def get_system_stats(self):
"""Общая статистика по системе"""
conn = self.get_db_connection()
cursor = conn.cursor()
try:
cursor.execute('''
SELECT
COUNT(*) as total_clients,
SUM(CASE WHEN status = 'Active' THEN 1 ELSE 0 END) as active_clients,
COALESCE(SUM(total_bytes_received), 0) as total_bytes_received,
COALESCE(SUM(total_bytes_sent), 0) as total_bytes_sent
FROM clients
''')
result = cursor.fetchone()
columns = [column[0] for column in cursor.description]
if result:
stats = dict(zip(columns, result))
# Добавляем человекочитаемые форматы
stats['total_received_gb'] = round(stats['total_bytes_received'] / (1024**3), 2)
stats['total_sent_gb'] = round(stats['total_bytes_sent'] / (1024**3), 2)
return stats
return {}
except Exception as e:
logger.error(f"Error system stats: {e}")
return {}
finally:
conn.close()
def get_analytics_data(self, range_arg='24h'):
"""
Get aggregated analytics with dynamic resolution.
range_arg: '24h', '7d', '30d'
"""
conn = self.get_db_connection()
cursor = conn.cursor()
analytics = {
'max_concurrent_24h': 0,
'top_clients_24h': [],
'global_history_24h': [],
'traffic_distribution': {'rx': 0, 'tx': 0}
}
# 1. Определяем таблицу и временную метку
target_table = 'usage_history'
hours = 24
if range_arg == '7d':
target_table = 'stats_hourly'
hours = 168 # 7 * 24
elif range_arg == '30d':
target_table = 'stats_6h' # или stats_daily
hours = 720 # 30 * 24
try:
# Проверка наличия таблицы
try:
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{target_table}'")
if not cursor.fetchone():
target_table = 'usage_history'
except:
pass
# 2. Глобальная история (График)
# Для агрегированных таблиц поля rate могут отсутствовать, заменяем нулями
if target_table == 'usage_history':
rate_cols = "SUM(bytes_received_rate_mbps) as total_rx_rate, SUM(bytes_sent_rate_mbps) as total_tx_rate,"
else:
rate_cols = "0 as total_rx_rate, 0 as total_tx_rate,"
query_hist = f'''
SELECT
timestamp,
SUM(bytes_received) as total_rx,
SUM(bytes_sent) as total_tx,
{rate_cols}
COUNT(DISTINCT client_id) as active_count
FROM {target_table}
WHERE timestamp >= datetime('now', '-{hours} hours')
GROUP BY timestamp
ORDER BY timestamp ASC
'''
cursor.execute(query_hist)
rows = cursor.fetchall()
if rows:
columns = [col[0] for col in cursor.description]
analytics['global_history_24h'] = [dict(zip(columns, row)) for row in rows]
# Максимум клиентов
max_clients = 0
for row in analytics['global_history_24h']:
if row['active_count'] > max_clients:
max_clients = row['active_count']
analytics['max_concurrent_24h'] = max_clients
# 3. Топ-3 самых активных клиентов (за выбранный период)
# Внимание: для топа всегда берем данные, но запрос может быть тяжелым на usage_history за месяц.
# Лучше использовать агрегаты, если период большой.
# Используем ту же таблицу, что и для истории, чтобы согласовать данные
query_top = f'''
SELECT
c.common_name,
SUM(t.bytes_received) as rx,
SUM(t.bytes_sent) as tx,
(SUM(t.bytes_received) + SUM(t.bytes_sent)) as total_traffic
FROM {target_table} t
JOIN clients c ON t.client_id = c.id
WHERE t.timestamp >= datetime('now', '-{hours} hours')
GROUP BY c.id
ORDER BY total_traffic DESC
LIMIT 3
'''
cursor.execute(query_top)
top_cols = [col[0] for col in cursor.description]
analytics['top_clients_24h'] = [dict(zip(top_cols, row)) for row in cursor.fetchall()]
# 4. Распределение трафика
query_dist = f'''
SELECT
SUM(bytes_received) as rx,
SUM(bytes_sent) as tx
FROM {target_table}
WHERE timestamp >= datetime('now', '-{hours} hours')
'''
cursor.execute(query_dist)
dist_res = cursor.fetchone()
if dist_res:
analytics['traffic_distribution'] = {'rx': dist_res[0] or 0, 'tx': dist_res[1] or 0}
return analytics
except Exception as e:
logger.error(f"Analytics error: {e}")
return analytics
finally:
conn.close()
# Initialize API instance
api = OpenVPNAPI()
# --- ROUTES ---
@app.route('/api/v1/stats', methods=['GET'])
def get_stats():
"""Get current statistics for all clients"""
try:
data = api.get_current_stats()
# Форматирование данных
formatted_data = []
for client in data:
client['total_received_mb'] = round((client['total_bytes_received'] or 0) / (1024*1024), 2)
client['total_sent_mb'] = round((client['total_bytes_sent'] or 0) / (1024*1024), 2)
client['current_recv_rate_mbps'] = client['current_recv_rate'] or 0
client['current_sent_rate_mbps'] = client['current_sent_rate'] or 0
formatted_data.append(client)
return jsonify({
'success': True,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'data': formatted_data,
'count': len(formatted_data)
})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/v1/stats/system', methods=['GET'])
def get_system_stats():
"""Get system-wide statistics"""
try:
stats = api.get_system_stats()
return jsonify({
'success': True,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'data': stats
})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/v1/stats/<string:common_name>', methods=['GET'])
def get_client_stats(common_name):
"""
Get detailed stats for a client.
Query Params:
- range: '24h' (default), '7d', '30d', '1y' OR custom dates
- resolution: 'auto' (default), 'raw', '5min', 'hourly', 'daily'
"""
try:
# Чтение параметров запроса
range_arg = request.args.get('range', default='24h')
resolution = request.args.get('resolution', default='auto')
# --- ИСПРАВЛЕНИЕ ТУТ ---
# Используем UTC, так как SQLite хранит данные в UTC
end_date = datetime.now(timezone.utc)
start_date = end_date - timedelta(hours=24)
# Парсинг диапазона
if range_arg.endswith('h'):
start_date = end_date - timedelta(hours=int(range_arg[:-1]))
elif range_arg.endswith('d'):
start_date = end_date - timedelta(days=int(range_arg[:-1]))
elif range_arg.endswith('y'):
start_date = end_date - timedelta(days=int(range_arg[:-1]) * 365)
# Получаем текущее состояние
all_stats = api.get_current_stats()
client_data = next((c for c in all_stats if c['common_name'] == common_name), None)
if not client_data:
return jsonify({'success': False, 'error': 'Client not found'}), 404
# Получаем исторические данные
history_result = api.get_client_history(
common_name,
start_date=start_date,
end_date=end_date,
resolution=resolution
)
response = {
'common_name': client_data['common_name'],
'real_address': client_data['real_address'],
'status': client_data['status'],
'totals': {
'received_mb': round((client_data['total_bytes_received'] or 0) / (1024*1024), 2),
'sent_mb': round((client_data['total_bytes_sent'] or 0) / (1024*1024), 2)
},
'current_rates': {
'recv_mbps': client_data['current_recv_rate'] or 0,
'sent_mbps': client_data['current_sent_rate'] or 0
},
'last_activity': client_data['last_activity'],
'history': history_result.get('data', []),
'meta': history_result.get('meta', {})
}
# Для timestamp ответа API лучше тоже использовать UTC или явно указывать смещение,
# но для совместимости с JS new Date() UTC строка идеальна.
return jsonify({
'success': True,
'timestamp': datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
'data': response
})
except Exception as e:
logger.error(f"API Error: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/v1/certificates', methods=['GET'])
def get_certificates():
try:
data = api.get_certificates_info()
return jsonify({'success': True, 'data': data})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/v1/clients', methods=['GET'])
def get_clients_list():
try:
data = api.get_current_stats()
simple_list = [{'common_name': c['common_name'], 'status': c['status']} for c in data]
return jsonify({'success': True, 'data': simple_list})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/v1/health', methods=['GET'])
def health_check():
try:
conn = api.get_db_connection()
conn.close()
return jsonify({'success': True, 'status': 'healthy'})
except Exception as e:
return jsonify({'success': False, 'status': 'unhealthy', 'error': str(e)}), 500
@app.route('/api/v1/analytics', methods=['GET'])
def get_analytics():
"""Get dashboard analytics data"""
try:
range_arg = request.args.get('range', default='24h')
# Маппинг для безопасности
valid_ranges = {'24h': '24h', '7d': '7d', '30d': '30d'}
selected_range = valid_ranges.get(range_arg, '24h')
data = api.get_analytics_data(selected_range)
return jsonify({
'success': True,
'timestamp': datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
'data': data,
'range': selected_range
})
except Exception as e:
logger.error(f"Error in analytics endpoint: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
if __name__ == "__main__":
host = api.config.get('api', 'host', fallback='0.0.0.0')
port = 5001 # Используем 5001, чтобы не конфликтовать, если что-то уже есть на 5000
debug = api.config.getboolean('api', 'debug', fallback=False)
logger.info(f"Starting API on {host}:{port}")
app.run(host=host, port=port, debug=debug)

510
APP/openvpn_gatherer_v3.py Normal file
View File

@@ -0,0 +1,510 @@
import sqlite3
import time
import os
import configparser
import logging
from datetime import datetime, timedelta
from db import DatabaseManager
# --- КЛАСС АГРЕГАЦИИ ДАННЫХ (TSDB LOGIC) ---
class TimeSeriesAggregator:
def __init__(self, db_provider):
self.db_provider = db_provider
self.logger = logging.getLogger(__name__)
def _upsert_bucket(self, cursor, table, timestamp, client_id, rx, tx):
"""
Вставляет или обновляет запись в таблицу агрегации.
Использует ON CONFLICT для атомарного обновления счетчиков.
"""
cursor.execute(f'''
INSERT INTO {table} (timestamp, client_id, bytes_received, bytes_sent)
VALUES (?, ?, ?, ?)
ON CONFLICT(timestamp, client_id) DO UPDATE SET
bytes_received = bytes_received + excluded.bytes_received,
bytes_sent = bytes_sent + excluded.bytes_sent
''', (timestamp, client_id, rx, tx))
def aggregate(self, client_updates):
"""
Распределяет инкременты трафика по временным слотам (5m, 15m, 1h, 6h, 1d).
"""
if not client_updates:
return
conn = self.db_provider()
cursor = conn.cursor()
now = datetime.now()
# --- РАСЧЕТ ВРЕМЕННЫХ КВАНТОВ ---
# 1. Сутки (00:00:00)
ts_1d = now.replace(hour=0, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
# 2. 6 часов (00, 06, 12, 18)
hour_6h = now.hour - (now.hour % 6)
ts_6h = now.replace(hour=hour_6h, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
# 3. 1 час (XX:00:00)
ts_1h = now.replace(minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
# 4. 15 минут (00, 15, 30, 45)
min_15m = now.minute - (now.minute % 15)
ts_15m = now.replace(minute=min_15m, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
# 5. 5 минут (00, 05, 10...)
min_5m = now.minute - (now.minute % 5)
ts_5m = now.replace(minute=min_5m, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
try:
updates_count = 0
for client in client_updates:
client_id = client.get('db_id')
# Пропускаем, если ID не определен
if client_id is None:
continue
rx = client.get('bytes_received_inc', 0)
tx = client.get('bytes_sent_inc', 0)
# Пропускаем, если нет трафика
if rx == 0 and tx == 0:
continue
# Запись во все уровни агрегации
self._upsert_bucket(cursor, 'stats_5min', ts_5m, client_id, rx, tx)
self._upsert_bucket(cursor, 'stats_15min', ts_15m, client_id, rx, tx)
self._upsert_bucket(cursor, 'stats_hourly', ts_1h, client_id, rx, tx)
self._upsert_bucket(cursor, 'stats_6h', ts_6h, client_id, rx, tx)
self._upsert_bucket(cursor, 'stats_daily', ts_1d, client_id, rx, tx)
updates_count += 1
conn.commit()
# Логируем только если были обновления
if updates_count > 0:
self.logger.debug(f"TS Aggregation: Updated buckets for {updates_count} clients")
except Exception as e:
self.logger.error(f"Error in TimeSeriesAggregator: {e}")
conn.rollback()
finally:
conn.close()
# --- ОСНОВНОЙ КЛАСС ---
class OpenVPNDataGatherer:
def __init__(self, config_file='config.ini'):
self.config = self.load_config(config_file)
self.setup_logging()
self.last_check_time = None
# Инициализируем дату последней очистки вчерашним днем для корректного старта
self.last_cleanup_date = (datetime.now() - timedelta(days=1)).date()
self.last_cleanup_date = (datetime.now() - timedelta(days=1)).date()
self.db_manager = DatabaseManager(config_file)
self.db_manager.init_database()
# Инициализация модуля агрегации
# Передаем ссылку на метод подключения к БД
self.ts_aggregator = TimeSeriesAggregator(self.db_manager.get_connection)
def load_config(self, config_file):
"""Загрузка конфигурации или создание дефолтной со сложной структурой"""
config = configparser.ConfigParser()
# Полная структура конфига согласно требованиям
defaults = {
'api': {
'host': '0.0.0.0',
'port': '5000',
'debug': 'false'
},
'openvpn_monitor': {
'log_path': '/var/log/openvpn/openvpn-status.log',
'db_path': 'openvpn_monitor.db',
'check_interval': '10', # Интервал 10 секунд
},
'logging': {
'level': 'INFO',
'log_file': 'openvpn_gatherer.log'
},
'retention': {
'raw_retention_days': '7', # 1 неделя
'agg_5m_retention_days': '14', # 2 недели
'agg_15m_retention_days': '28', # 4 недели
'agg_1h_retention_days': '90', # 3 месяца
'agg_6h_retention_days': '180', # 6 месяцев
'agg_1d_retention_days': '365' # 12 месяцев
},
'visualization': {
'refresh_interval': '5',
'max_display_rows': '50'
},
'certificates': {
'certificates_path': '/opt/ovpn/pki/issued',
'certificate_extensions': 'crt'
}
}
try:
if os.path.exists(config_file):
config.read(config_file)
# Проверка: если каких-то новых секций нет в старом файле, добавляем их
updated = False
for section, options in defaults.items():
if not config.has_section(section):
config.add_section(section)
updated = True
for key, val in options.items():
if not config.has_option(section, key):
config.set(section, key, val)
updated = True
if updated:
with open(config_file, 'w') as f:
config.write(f)
print(f"Updated configuration file: {config_file}")
else:
# Создаем файл с нуля
for section, options in defaults.items():
config[section] = options
with open(config_file, 'w') as f:
config.write(f)
print(f"Created default configuration file: {config_file}")
except Exception as e:
print(f"Error loading config: {e}")
# Fallback в памяти
for section, options in defaults.items():
if not config.has_section(section):
config.add_section(section)
for key, val in options.items():
config.set(section, key, val)
return config
def setup_logging(self):
try:
log_level = self.config.get('logging', 'level', fallback='INFO')
log_file = self.config.get('logging', 'log_file', fallback='openvpn_gatherer.log')
# Создаем директорию для логов если нужно
log_dir = os.path.dirname(log_file)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler()
]
)
self.logger = logging.getLogger(__name__)
except Exception as e:
print(f"Logging setup failed: {e}")
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
def get_config_value(self, section, key, default=None):
try:
return self.config.get(section, key, fallback=default)
except:
return default
# get_db_connection and init_database removed
def cleanup_old_data(self):
"""Очистка данных согласно retention policies в config.ini"""
self.logger.info("Starting data cleanup procedure...")
conn = self.db_manager.get_connection()
cursor = conn.cursor()
# Маппинг: Таблица -> Ключ конфига -> Дефолт (дни)
retention_rules = [
('usage_history', 'raw_retention_days', 7),
('stats_5min', 'agg_5m_retention_days', 14),
('stats_15min', 'agg_15m_retention_days', 28),
('stats_hourly', 'agg_1h_retention_days', 90),
('stats_6h', 'agg_6h_retention_days', 180),
('stats_daily', 'agg_1d_retention_days', 365),
]
try:
total_deleted = 0
for table, config_key, default_days in retention_rules:
days = int(self.get_config_value('retention', config_key, default_days))
if days > 0:
cutoff_date = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d %H:%M:%S')
cursor.execute(f'DELETE FROM {table} WHERE timestamp < ?', (cutoff_date,))
deleted = cursor.rowcount
if deleted > 0:
self.logger.info(f"Cleaned {table}: removed {deleted} records older than {days} days")
total_deleted += deleted
conn.commit()
if total_deleted == 0:
self.logger.info("Cleanup finished: nothing to delete")
except Exception as e:
self.logger.error(f"Cleanup Error: {e}")
conn.rollback()
finally:
conn.close()
def parse_log_file(self):
"""
Парсинг лога версии 2 (CSV формат).
Ожидает формат: CLIENT_LIST,Common Name,Real Address,...,Bytes Received,Bytes Sent,...
"""
log_path = self.get_config_value('openvpn_monitor', 'log_path', '/var/log/openvpn/openvpn-status.log')
clients = []
try:
if not os.path.exists(log_path):
self.logger.warning(f"Log file not found: {log_path}")
return clients
with open(log_path, 'r') as file:
for line in file:
line = line.strip()
# Фильтруем только строки с данными клиентов
if not line.startswith('CLIENT_LIST'):
continue
parts = line.split(',')
# V2 Index Map:
# 1: Common Name
# 2: Real Address
# 5: Bytes Received
# 6: Bytes Sent
if len(parts) >= 8 and parts[1] != 'Common Name':
try:
client = {
'common_name': parts[1].strip(),
'real_address': parts[2].strip(),
'bytes_received': int(parts[5].strip()),
'bytes_sent': int(parts[6].strip()),
'status': 'Active'
}
clients.append(client)
except (ValueError, IndexError) as e:
self.logger.warning(f"Error parsing client line: {e}")
self.logger.debug(f"Parsed {len(clients)} active clients")
except Exception as e:
self.logger.error(f"Error parsing log file: {e}")
return clients
def update_client_status_and_bytes(self, active_clients):
"""Обновление статусов и расчет инкрементов трафика"""
conn = self.db_manager.get_connection()
cursor = conn.cursor()
try:
# Загружаем текущее состояние всех клиентов
cursor.execute('SELECT id, common_name, status, last_bytes_received, last_bytes_sent FROM clients')
db_clients = {}
for row in cursor.fetchall():
db_clients[row[1]] = {
'id': row[0],
'status': row[2],
'last_bytes_received': row[3],
'last_bytes_sent': row[4]
}
active_names = set()
for client in active_clients:
name = client['common_name']
active_names.add(name)
curr_recv = client['bytes_received']
curr_sent = client['bytes_sent']
if name in db_clients:
# Клиент существует в базе
db_client = db_clients[name]
client['db_id'] = db_client['id'] # ID для агрегатора и истории
# Проверка на рестарт сервера/сессии (сброс счетчиков)
# Если текущее значение меньше сохраненного, значит был сброс -> берем всё текущее значение как дельту
if curr_recv < db_client['last_bytes_received']:
inc_recv = curr_recv
self.logger.info(f"Counter reset detected for {name} (Recv)")
else:
inc_recv = curr_recv - db_client['last_bytes_received']
if curr_sent < db_client['last_bytes_sent']:
inc_sent = curr_sent
self.logger.info(f"Counter reset detected for {name} (Sent)")
else:
inc_sent = curr_sent - db_client['last_bytes_sent']
# Обновляем клиента
cursor.execute('''
UPDATE clients
SET status = 'Active',
real_address = ?,
total_bytes_received = total_bytes_received + ?,
total_bytes_sent = total_bytes_sent + ?,
last_bytes_received = ?,
last_bytes_sent = ?,
updated_at = CURRENT_TIMESTAMP,
last_activity = CURRENT_TIMESTAMP
WHERE id = ?
''', (
client['real_address'],
inc_recv,
inc_sent,
curr_recv,
curr_sent,
db_client['id']
))
client['bytes_received_inc'] = inc_recv
client['bytes_sent_inc'] = inc_sent
else:
# Новый клиент
cursor.execute('''
INSERT INTO clients
(common_name, real_address, status, total_bytes_received, total_bytes_sent, last_bytes_received, last_bytes_sent)
VALUES (?, ?, 'Active', 0, 0, ?, ?)
''', (
name,
client['real_address'],
curr_recv,
curr_sent
))
new_id = cursor.lastrowid
client['db_id'] = new_id
# Для первой записи считаем инкремент 0 (или можно считать весь трафик)
client['bytes_received_inc'] = 0
client['bytes_sent_inc'] = 0
self.logger.info(f"New client added: {name}")
# Помечаем отключенных
for name, db_client in db_clients.items():
if name not in active_names and db_client['status'] == 'Active':
cursor.execute('''
UPDATE clients
SET status = 'Disconnected', updated_at = CURRENT_TIMESTAMP
WHERE id = ?
''', (db_client['id'],))
self.logger.info(f"Client disconnected: {name}")
conn.commit()
except Exception as e:
self.logger.error(f"Error updating client status: {e}")
conn.rollback()
finally:
conn.close()
return active_clients
def calculate_rates(self, clients, time_diff):
"""Расчет скорости в Mbps"""
if time_diff <= 0:
time_diff = 1.0 # Защита от деления на 0
# Коэффициент: (байты * 8 бит) / (секунды * 1 млн)
factor = 8 / (time_diff * 1_000_000)
for client in clients:
client['bytes_received_rate_mbps'] = client.get('bytes_received_inc', 0) * factor
client['bytes_sent_rate_mbps'] = client.get('bytes_sent_inc', 0) * factor
return clients
def store_usage_history(self, clients):
"""Сохранение высокодетализированной (Raw) истории"""
if not clients:
return
conn = self.db_manager.get_connection()
cursor = conn.cursor()
try:
for client in clients:
if client.get('db_id'):
cursor.execute('''
INSERT INTO usage_history
(client_id, bytes_received, bytes_sent, bytes_received_rate_mbps, bytes_sent_rate_mbps)
VALUES (?, ?, ?, ?, ?)
''', (
client['db_id'],
client.get('bytes_received_inc', 0),
client.get('bytes_sent_inc', 0),
client.get('bytes_received_rate_mbps', 0),
client.get('bytes_sent_rate_mbps', 0)
))
conn.commit()
except Exception as e:
self.logger.error(f"Error storing raw history: {e}")
conn.rollback()
finally:
conn.close()
def run_monitoring_cycle(self):
"""Один цикл мониторинга"""
current_time = datetime.now()
# 1. Получаем активных клиентов
active_clients = self.parse_log_file()
# 2. Обновляем статусы и считаем дельту трафика
clients_with_updates = self.update_client_status_and_bytes(active_clients)
if clients_with_updates:
# 3. Считаем интервал времени
time_diff = 10.0 # Номинал
if self.last_check_time:
time_diff = (current_time - self.last_check_time).total_seconds()
# 4. Считаем скорости
clients_rated = self.calculate_rates(clients_with_updates, time_diff)
# 5. Сохраняем RAW историю (для графиков реального времени)
self.store_usage_history(clients_rated)
# 6. Агрегируем в TSDB (5m, 15m, 1h, 6h, 1d)
self.ts_aggregator.aggregate(clients_rated)
self.last_check_time = current_time
# 7. Проверка необходимости очистки (раз в сутки)
if current_time.date() > self.last_cleanup_date:
self.logger.info("New day detected. Initiating cleanup.")
self.cleanup_old_data()
self.last_cleanup_date = current_time.date()
def start_monitoring(self):
"""Запуск цикла"""
interval = int(self.get_config_value('openvpn_monitor', 'check_interval', 10))
self.logger.info(f"Starting OpenVPN Monitoring. Interval: {interval}s")
self.logger.info("Press Ctrl+C to stop")
try:
while True:
self.run_monitoring_cycle()
time.sleep(interval)
except KeyboardInterrupt:
self.logger.info("Monitoring stopped by user")
except Exception as e:
self.logger.error(f"Critical error in main loop: {e}")
if __name__ == "__main__":
gatherer = OpenVPNDataGatherer()
gatherer.start_monitoring()

2
APP/requirements.txt Normal file
View File

@@ -0,0 +1,2 @@
Flask==3.0.0
Flask-Cors==4.0.0