Python SDK
Python SDK
The GOVERN Python SDK provides a clean, typed interface to the entire GOVERN platform. It wraps the REST API with retry logic, streaming support for long-running assessments, and Pythonic result objects.
Installation
pip install archetypal-governRequires Python 3.9 or later. The SDK has no mandatory dependencies beyond httpx and pydantic.
# Install with async support (recommended)pip install "archetypal-govern[async]"
# Install with CLI utilitiespip install "archetypal-govern[cli]"Initialization
from govern import GovernClient
# Initialize with API keyclient = GovernClient(api_key="gvn_live_xxxxxxxxxxxx")
# Or use environment variable GOVERN_API_KEYimport osclient = GovernClient() # reads from GOVERN_API_KEY env varConfiguration options:
client = GovernClient( api_key="gvn_live_xxxxxxxxxxxx", base_url="https://api.govern.archetypal.ai/v1", # default timeout=120, # request timeout in seconds (default: 60) max_retries=3, # number of retries on transient errors (default: 3) org_id="org_xxxx", # optional: scopes all calls to a specific org)check_input()
Evaluate a prompt or user input against active governance policies before it reaches your AI model. Use this as a pre-inference gate in your application.
from govern import GovernClient, InputVerdict
client = GovernClient(api_key="gvn_live_xxxxxxxxxxxx")
result = client.check_input( system_id="sys_01HXXXXXXXXXXX", input_text="Summarize the performance review for employee ID 4821", user_context={ "user_id": "user_9983", "session_id": "sess_abc", "request_source": "hr_portal" })
print(result.verdict) # ALLOW | WARN | BLOCKprint(result.policy_id) # which policy triggered (if WARN or BLOCK)print(result.reason) # human-readable explanationprint(result.cuc_score) # coherence under constraint score (0.0–1.0)
if result.verdict == InputVerdict.BLOCK: raise GovernPolicyError(f"Input blocked: {result.reason}")The check_input() gate in your inference pipeline:
def call_ai_model(prompt: str, user_id: str) -> str: # 1. Gate the input check = client.check_input( system_id=MY_SYSTEM_ID, input_text=prompt, user_context={"user_id": user_id} )
if check.verdict == "BLOCK": return "This request cannot be processed due to governance policy."
# 2. Call your model response = my_llm_client.complete(prompt)
# 3. Score the output (see score_output below) client.score_output( system_id=MY_SYSTEM_ID, input_text=prompt, output_text=response, )
return responsescore_output()
Score a model’s output for governance compliance after inference. This scores the output against active policy rules and updates the system’s behavioral baseline in Energy.
result = client.score_output( system_id="sys_01HXXXXXXXXXXX", input_text="Summarize the performance review for employee ID 4821", output_text="Employee 4821 received a rating of 3.8/5.0 in Q1 2026...", metadata={ "model_version": "v2.1", "latency_ms": 1240, "token_count": 312 })
print(result.score) # 0.0–1.0 governance score for this outputprint(result.verdict) # PASS | CONDITIONAL | FAILprint(result.findings) # list of Finding objects (empty if PASS)print(result.cuc_score) # coherence under constraintprint(result.energy_recorded) # True if this scored against Energy baseline
for finding in result.findings: print(f" [{finding.severity}] {finding.title}") print(f" Control: {finding.control}") print(f" Remediation: {finding.remediation_url}")Batch scoring for high-throughput pipelines:
results = client.score_output_batch( system_id="sys_01HXXXXXXXXXXX", samples=[ {"input": prompt1, "output": output1}, {"input": prompt2, "output": output2}, {"input": prompt3, "output": output3}, ])
for i, result in enumerate(results): print(f"Sample {i}: {result.verdict} (score={result.score:.2f})")run_assessment()
Trigger a full RDL-powered governance assessment of an AI system. This is the same assessment that runs in the Dashboard — document analysis, policy evaluation, framework compliance scoring.
from govern import GovernClient, Framework, AssessMode
client = GovernClient(api_key="gvn_live_xxxxxxxxxxxx")
# Start an assessmentassessment = client.run_assessment( system_id="sys_01HXXXXXXXXXXX", mode=AssessMode.DOCUMENT, files=[ "artifacts/model_card.pdf", "artifacts/data_sheet.json", "artifacts/system_security_plan.pdf" ], frameworks=[Framework.NIST_800_53, Framework.NIST_AI_RMF], notify_on_complete=True)
print(f"Assessment ID: {assessment.id}")print(f"Status: {assessment.status}") # QUEUED | RUNNING | COMPLETE | FAILEDWait for completion (polling):
import time
while assessment.status in ("QUEUED", "RUNNING"): time.sleep(10) assessment = client.get_assessment(assessment.id)
print(f"Verdict: {assessment.verdict}") # PASS | CONDITIONAL | FAILprint(f"Score: {assessment.score:.2f}")
# Access framework-specific resultsfor framework_id, fw_result in assessment.framework_results.items(): failing = [c for c in fw_result.controls if not c.passing] print(f"\n{framework_id}: {len(failing)} failing controls") for control in failing: print(f" {control.id}: {control.title}")CI/CD gate pattern — block deploy if score below threshold:
from govern import GovernClient, DeployGate, DeployBlockedError
client = GovernClient(api_key="gvn_live_xxxxxxxxxxxx")
gate = DeployGate( client=client, system_id="sys_01HXXXXXXXXXXX", mode="sandbox", image="registry.company.com/my-ai-app:v2.1.0", framework="nist-800-53", threshold=0.80, blocking_severities=["HIGH", "CRITICAL"],)
try: gate.evaluate() print("Governance gate passed — deployment authorized")except DeployBlockedError as e: print(f"Deployment blocked: {e}") print(f"Score: {e.score:.2f} (threshold: {e.threshold:.2f})") print(f"Blocking findings: {e.blocking_findings}") exit(1)Async Support
All synchronous methods have async equivalents in govern.aio:
import asynciofrom govern.aio import AsyncGovernClient
async def main(): client = AsyncGovernClient(api_key="gvn_live_xxxxxxxxxxxx")
# Async input check result = await client.check_input( system_id="sys_01HXXXXXXXXXXX", input_text="What is the customer's credit score?", ) print(result.verdict)
# Async output scoring score = await client.score_output( system_id="sys_01HXXXXXXXXXXX", input_text="What is the customer's credit score?", output_text="Based on available data, the customer's credit score is 720.", ) print(score.verdict)
# Async assessment with streaming status updates async for status in client.stream_assessment( system_id="sys_01HXXXXXXXXXXX", files=["model_card.pdf"], frameworks=["nist_ai_rmf"], ): print(f"[{status.stage}] {status.message}")
asyncio.run(main())Concurrent scoring in async applications:
async def score_all_outputs(outputs: list[dict]) -> list: client = AsyncGovernClient(api_key="gvn_live_xxxxxxxxxxxx") tasks = [ client.score_output( system_id="sys_01HXXXXXXXXXXX", input_text=o["input"], output_text=o["output"] ) for o in outputs ] return await asyncio.gather(*tasks)Error Handling
The SDK raises typed exceptions. Catch specific errors to handle them appropriately.
from govern.exceptions import ( GovernAuthError, # 401 — invalid or expired API key GovernPermissionError, # 403 — key lacks required scope GovernNotFoundError, # 404 — system_id or resource not found GovernPolicyError, # Policy blocked the action GovernRateLimitError, # 429 — rate limit exceeded GovernAPIError, # 5xx — server-side error GovernTimeoutError, # Request timed out)
try: result = client.check_input( system_id="sys_01HXXXXXXXXXXX", input_text=user_prompt )except GovernAuthError: logger.error("Invalid GOVERN API key — check GOVERN_API_KEY env var") raiseexcept GovernNotFoundError as e: logger.error(f"System not found: {e.system_id}") raiseexcept GovernPolicyError as e: logger.warning(f"Input blocked by policy {e.policy_id}: {e.reason}") return fallback_response()except GovernRateLimitError as e: logger.warning(f"Rate limited — retry after {e.retry_after}s") time.sleep(e.retry_after) # retry...except GovernAPIError as e: logger.error(f"GOVERN API error {e.status_code}: {e.message}") raiseFail-open vs fail-closed configuration:
# Fail-closed: block if GOVERN is unreachabletry: check = client.check_input(system_id=SYSTEM_ID, input_text=prompt) if check.verdict == "BLOCK": return "Request blocked by governance policy."except (GovernAPIError, GovernTimeoutError): return "Service temporarily unavailable." # Fail closed
# Fail-open: allow if GOVERN is unreachable (use only for non-critical paths)try: check = client.check_input(system_id=SYSTEM_ID, input_text=prompt) if check.verdict == "BLOCK": return "Request blocked by governance policy."except (GovernAPIError, GovernTimeoutError): logger.warning("GOVERN unreachable — allowing request (fail-open mode)") # Continue to modelFull Reference
| Method | Description |
|---|---|
client.check_input(system_id, input_text, user_context) | Pre-inference policy gate |
client.score_output(system_id, input_text, output_text, metadata) | Post-inference governance score |
client.score_output_batch(system_id, samples) | Batch output scoring |
client.run_assessment(system_id, mode, files, frameworks) | Trigger full RDL assessment |
client.get_assessment(assessment_id) | Fetch assessment status and results |
client.list_findings(system_id, severity, status) | List active findings for a system |
client.acknowledge_finding(finding_id) | Acknowledge a finding |
client.get_energy(system_id) | Fetch current Energy scores |
client.list_policies(scope) | List active policies |
client.get_compliance_report(framework) | Fetch compliance summary |
Full API reference: https://docs.govern.archetypal.ai/api