Spaces:
Running
Running
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from typing import Optional | |
| import random | |
| import time | |
| app = FastAPI( | |
| title="ARF Sandbox API", | |
| description="Mock endpoint – does NOT use the real Bayesian engine. Simulated responses only.", | |
| version="1.0.0", | |
| docs_url="/docs", | |
| redoc_url="/redoc", | |
| ) | |
| # ---------- Request/Response Models ---------- | |
| class Metrics(BaseModel): | |
| latency_ms: Optional[float] = None | |
| error_rate: Optional[float] = None | |
| throughput: Optional[float] = None | |
| cpu_usage: Optional[float] = None | |
| class EvaluateRequest(BaseModel): | |
| service_name: str | |
| event_type: str # e.g., "latency", "error_rate", "cpu_spike" | |
| severity: str # "low", "medium", "high", "critical" | |
| metrics: Optional[Metrics] = None | |
| timestamp: Optional[float] = None | |
| class EvaluateResponse(BaseModel): | |
| status: str | |
| recommendation: str # "APPROVE", "DENY", "ESCALATE" | |
| risk_score: float | |
| confidence: float | |
| justification: str | |
| policy_violations: list | |
| # ---------- Mock Logic ---------- | |
| def generate_mock_response(request: EvaluateRequest) -> EvaluateResponse: | |
| # Deterministic randomness based on service name and event type | |
| seed = hash((request.service_name, request.event_type, request.severity)) % 1000 | |
| random.seed(seed) | |
| # Simulate risk score based on severity | |
| severity_map = {"low": 0.2, "medium": 0.4, "high": 0.7, "critical": 0.9} | |
| base_risk = severity_map.get(request.severity, 0.5) | |
| # Add small random noise | |
| risk = min(0.99, max(0.01, base_risk + random.uniform(-0.1, 0.1))) | |
| # Decision logic (mock) | |
| if risk < 0.3: | |
| rec = "APPROVE" | |
| elif risk > 0.8: | |
| rec = "DENY" | |
| else: | |
| rec = "ESCALATE" | |
| # Confidence inversely related to uncertainty (simulated) | |
| confidence = 1.0 - (risk * 0.3) + random.uniform(-0.05, 0.05) | |
| confidence = min(0.99, max(0.5, confidence)) | |
| # Justification template | |
| justification = ( | |
| f"Simulated evaluation for {request.service_name}: {request.event_type} severity={request.severity}. " | |
| f"Risk score {risk:.2f} → {rec}. (Mock response, not real inference.)" | |
| ) | |
| return EvaluateResponse( | |
| status="success", | |
| recommendation=rec, | |
| risk_score=round(risk, 4), | |
| confidence=round(confidence, 4), | |
| justification=justification, | |
| policy_violations=[] | |
| ) | |
| # ---------- Endpoints ---------- | |
| async def health(): | |
| return {"status": "ok", "timestamp": time.time()} | |
| async def evaluate(request: EvaluateRequest): | |
| # Simple input validation | |
| if not request.service_name or not request.event_type: | |
| raise HTTPException(status_code=400, detail="Missing service_name or event_type") | |
| return generate_mock_response(request) | |
| # ---------- Root redirect to docs ---------- | |
| async def root(): | |
| return {"message": "ARF Sandbox API. See /docs for interactive documentation."} | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |