Files
iDRAC_Info/backend/routes/jobs.py
2025-10-16 15:06:50 +09:00

279 lines
8.8 KiB
Python

"""
Flask Blueprint for iDRAC Job Monitoring (Redfish 버전)
기존 routes/jobs.py 또는 backend/routes/jobs.py를 이 파일로 교체하세요.
"""
import time
import logging
from flask import Blueprint, render_template, jsonify, request
from flask_login import login_required
from backend.services.idrac_jobs import (
scan_all,
parse_ip_list,
load_ip_list,
LRUJobCache,
is_active_status,
is_done_status,
parse_iso_datetime,
iso_now
)
import os
logger = logging.getLogger(__name__)
# Blueprint 생성
jobs_bp = Blueprint("jobs", __name__, url_prefix="/jobs")
# Job 캐시 (전역)
MAX_CACHE_SIZE = int(os.getenv("MAX_CACHE_SIZE", "10000"))
CACHE_GC_INTERVAL = int(os.getenv("CACHE_GC_INTERVAL", "3600"))
JOB_GRACE_MINUTES = int(os.getenv("JOB_GRACE_MINUTES", "60"))
JOB_RECENCY_HOURS = int(os.getenv("JOB_RECENCY_HOURS", "24"))
JOB_CACHE = LRUJobCache(max_size=MAX_CACHE_SIZE)
# ────────────────────────────────────────────────────────────
# Routes
# ────────────────────────────────────────────────────────────
@jobs_bp.route("", methods=["GET"])
@login_required
def jobs_page():
"""메인 페이지"""
return render_template("jobs.html")
@jobs_bp.route("/config", methods=["GET"])
@login_required
def jobs_config():
"""프론트엔드 설정 제공"""
return jsonify({
"ok": True,
"config": {
"grace_minutes": JOB_GRACE_MINUTES,
"recency_hours": JOB_RECENCY_HOURS,
"poll_interval_ms": int(os.getenv("POLL_INTERVAL_MS", "10000")),
}
})
@jobs_bp.route("/iplist", methods=["GET"])
@login_required
def get_ip_list():
"""IP 목록 조회 (파일에서)"""
try:
ips = load_ip_list()
return jsonify({
"ok": True,
"ips": ips,
"count": len(ips)
})
except Exception as e:
logger.exception("Failed to load IP list")
return jsonify({
"ok": False,
"error": str(e)
}), 500
@jobs_bp.route("/scan", methods=["POST"])
@login_required
def scan_jobs():
"""
Job 스캔 및 모니터링
Request Body:
{
"ips": List[str] (optional),
"method": "redfish" (기본값),
"recency_hours": int (기본: 24),
"grace_minutes": int (기본: 60),
"include_tracked_done": bool (기본: True)
}
Response:
{
"ok": True,
"count": int,
"items": [
{
"ip": str,
"ok": bool,
"error": str (if not ok),
"jobs": List[Dict]
}
]
}
"""
data = request.get_json(silent=True) or {}
# IP 목록
ip_input = data.get("ips")
if ip_input:
ips = parse_ip_list("\n".join(ip_input) if isinstance(ip_input, list) else str(ip_input))
else:
ips = load_ip_list()
if not ips:
return jsonify({
"ok": False,
"error": "No IPs provided",
"items": []
}), 400
# 파라미터
method = data.get("method", "redfish") # redfish가 기본값
recency_hours = int(data.get("recency_hours", JOB_RECENCY_HOURS))
grace_minutes = int(data.get("grace_minutes", JOB_GRACE_MINUTES))
include_tracked_done = bool(data.get("include_tracked_done", True))
grace_sec = grace_minutes * 60
cutoff = time.time() - recency_hours * 3600
# 현재 IP 목록과 다른 캐시 항목 제거
JOB_CACHE.clear_for_ips(set(ips))
# 스캔 실행
try:
items = scan_all(ips, method=method)
except Exception as e:
logger.exception("Scan failed")
return jsonify({
"ok": False,
"error": str(e),
"items": []
}), 500
now = time.time()
# 캐시 업데이트
for item in items:
ip = item.get("ip", "")
if not item.get("ok") or not isinstance(item.get("jobs"), list):
continue
for job in item["jobs"]:
status = job.get("Status")
message = job.get("Message")
active_now = is_active_status(status, message)
done_now = is_done_status(status)
# 시작 시간 파싱
start_ts = parse_iso_datetime(job.get("StartTime"))
# 리센시 판정
if not active_now:
if start_ts is None or start_ts < cutoff:
continue
# 캐시 키 생성
key = _make_cache_key(ip, job)
entry = JOB_CACHE.get(key)
if entry is None:
JOB_CACHE.set(key, {
"record": dict(job),
"first_seen_active": (now if active_now else None),
"became_done_at": (now if done_now else None),
"first_seen": now,
"last_seen": now,
"start_ts": start_ts,
})
else:
entry["record"] = dict(job)
entry["last_seen"] = now
if active_now and not entry.get("first_seen_active"):
entry["first_seen_active"] = now
if done_now and not entry.get("became_done_at"):
entry["became_done_at"] = now
elif not done_now:
entry["became_done_at"] = None
if start_ts:
entry["start_ts"] = start_ts
JOB_CACHE.set(key, entry)
# 응답 생성
out_items = []
for item in items:
ip = item.get("ip", "")
shown_jobs = []
# 현재 Active Job
current_active = []
if item.get("ok") and isinstance(item.get("jobs"), list):
for job in item["jobs"]:
if is_active_status(job.get("Status"), job.get("Message")):
key = _make_cache_key(ip, job)
if key in JOB_CACHE.keys():
current_active.append(JOB_CACHE.get(key)["record"])
if current_active:
shown_jobs = current_active
else:
# Active가 없을 때: 추적된 최근 완료 Job 표시
if include_tracked_done:
for key in JOB_CACHE.keys():
if key[0] != ip:
continue
entry = JOB_CACHE.get(key)
if not entry:
continue
start_ok = (entry.get("start_ts") or 0) >= cutoff
done_at = entry.get("became_done_at")
done_ok = bool(done_at and now - done_at <= grace_sec)
still_active = entry.get("became_done_at") is None
if still_active and start_ok:
shown_jobs.append(entry["record"])
elif done_ok and start_ok:
rec = dict(entry["record"])
rec["RecentlyCompleted"] = True
rec["CompletedAt"] = iso_now()
shown_jobs.append(rec)
out_items.append({
"ip": ip,
"ok": item.get("ok"),
"error": item.get("error"),
"jobs": sorted(shown_jobs, key=lambda r: r.get("JID", ""))
})
# 캐시 GC (조건부)
if now - JOB_CACHE.last_gc >= CACHE_GC_INTERVAL:
JOB_CACHE.gc(max_age_seconds=24 * 3600)
return jsonify({
"ok": True,
"count": len(out_items),
"items": out_items
})
def _make_cache_key(ip: str, job: dict):
"""캐시 키 생성"""
jid = (job.get("JID") or "").strip()
if jid:
return (ip, jid)
name = (job.get("Name") or "").strip()
return (ip, f"NOJID::{name}")
# ────────────────────────────────────────────────────────────
# 기존 패턴에 맞는 register 함수 추가
# ────────────────────────────────────────────────────────────
def register_jobs_routes(app):
"""
iDRAC Job 모니터링 라우트 등록
기존 프로젝트 패턴에 맞춘 함수
"""
from flask import Flask
app.register_blueprint(jobs_bp)
logger.info("Jobs routes registered at /jobs")