petter2025 commited on
Commit
8705a00
·
verified ·
1 Parent(s): bc06d10

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -0
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from typing import Optional
4
+ import random
5
+ import time
6
+
7
+ app = FastAPI(
8
+ title="ARF Sandbox API",
9
+ description="Mock endpoint – does NOT use the real Bayesian engine. Simulated responses only.",
10
+ version="1.0.0",
11
+ docs_url="/docs",
12
+ redoc_url="/redoc",
13
+ )
14
+
15
+ # ---------- Request/Response Models ----------
16
+ class Metrics(BaseModel):
17
+ latency_ms: Optional[float] = None
18
+ error_rate: Optional[float] = None
19
+ throughput: Optional[float] = None
20
+ cpu_usage: Optional[float] = None
21
+
22
+ class EvaluateRequest(BaseModel):
23
+ service_name: str
24
+ event_type: str # e.g., "latency", "error_rate", "cpu_spike"
25
+ severity: str # "low", "medium", "high", "critical"
26
+ metrics: Optional[Metrics] = None
27
+ timestamp: Optional[float] = None
28
+
29
+ class EvaluateResponse(BaseModel):
30
+ status: str
31
+ recommendation: str # "APPROVE", "DENY", "ESCALATE"
32
+ risk_score: float
33
+ confidence: float
34
+ justification: str
35
+ policy_violations: list
36
+
37
+ # ---------- Mock Logic ----------
38
+ def generate_mock_response(request: EvaluateRequest) -> EvaluateResponse:
39
+ # Deterministic randomness based on service name and event type
40
+ seed = hash((request.service_name, request.event_type, request.severity)) % 1000
41
+ random.seed(seed)
42
+
43
+ # Simulate risk score based on severity
44
+ severity_map = {"low": 0.2, "medium": 0.4, "high": 0.7, "critical": 0.9}
45
+ base_risk = severity_map.get(request.severity, 0.5)
46
+ # Add small random noise
47
+ risk = min(0.99, max(0.01, base_risk + random.uniform(-0.1, 0.1)))
48
+
49
+ # Decision logic (mock)
50
+ if risk < 0.3:
51
+ rec = "APPROVE"
52
+ elif risk > 0.8:
53
+ rec = "DENY"
54
+ else:
55
+ rec = "ESCALATE"
56
+
57
+ # Confidence inversely related to uncertainty (simulated)
58
+ confidence = 1.0 - (risk * 0.3) + random.uniform(-0.05, 0.05)
59
+ confidence = min(0.99, max(0.5, confidence))
60
+
61
+ # Justification template
62
+ justification = (
63
+ f"Simulated evaluation for {request.service_name}: {request.event_type} severity={request.severity}. "
64
+ f"Risk score {risk:.2f} → {rec}. (Mock response, not real inference.)"
65
+ )
66
+
67
+ return EvaluateResponse(
68
+ status="success",
69
+ recommendation=rec,
70
+ risk_score=round(risk, 4),
71
+ confidence=round(confidence, 4),
72
+ justification=justification,
73
+ policy_violations=[]
74
+ )
75
+
76
+ # ---------- Endpoints ----------
77
+ @app.get("/health", tags=["health"])
78
+ async def health():
79
+ return {"status": "ok", "timestamp": time.time()}
80
+
81
+ @app.post("/v1/evaluate", response_model=EvaluateResponse, tags=["evaluation"])
82
+ async def evaluate(request: EvaluateRequest):
83
+ # Simple input validation
84
+ if not request.service_name or not request.event_type:
85
+ raise HTTPException(status_code=400, detail="Missing service_name or event_type")
86
+ return generate_mock_response(request)
87
+
88
+ # ---------- Root redirect to docs ----------
89
+ @app.get("/", include_in_schema=False)
90
+ async def root():
91
+ return {"message": "ARF Sandbox API. See /docs for interactive documentation."}
92
+
93
+ if __name__ == "__main__":
94
+ import uvicorn
95
+ uvicorn.run(app, host="0.0.0.0", port=7860)