586 lines
21 KiB
Python
586 lines
21 KiB
Python
"""Human-operational rulebook for Mais Humana.
|
|
|
|
The platform already knows how to scan repositories and build reports. This
|
|
module adds a deterministic rulebook that translates those reports into a
|
|
control-plane vocabulary a human administrator can use: which profile is served,
|
|
which surface must exist, which MCP transit fields are mandatory, which evidence
|
|
proves the rule, and what next order is justified when the rule is not covered.
|
|
|
|
Most rules are generated from the canonical catalog by
|
|
``tools/generate_human_rulebook.py``. The hand-written code here keeps the
|
|
runtime behavior small, testable, and independent of the generation step.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import csv
|
|
import io
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
from pathlib import Path
|
|
from typing import Any, Iterable, Mapping, Sequence
|
|
|
|
from .models import PlatformHumanReport, as_plain_data, merge_unique, slugify, utc_now
|
|
|
|
|
|
class RuleScope(str, Enum):
|
|
"""Scope used to group rulebook entries."""
|
|
|
|
PROFILE_PLATFORM = "profile_platform"
|
|
PROFILE_SURFACE = "profile_surface"
|
|
PLATFORM_SURFACE = "platform_surface"
|
|
DEPENDENCY = "dependency"
|
|
MCP_TRANSIT = "mcp_transit"
|
|
CANONICAL_IDENTITY = "canonical_identity"
|
|
|
|
|
|
class RuleOutcome(str, Enum):
|
|
"""Normalized coverage result for a rule."""
|
|
|
|
COVERED = "covered"
|
|
PARTIAL = "partial"
|
|
MISSING = "missing"
|
|
BLOCKED = "blocked"
|
|
EXCEPTION = "exception"
|
|
|
|
|
|
class TruthState(str, Enum):
|
|
"""How close a rule is to an operational source of truth."""
|
|
|
|
UNKNOWN = "unknown"
|
|
CATALOG_ONLY = "catalog_only"
|
|
DOCUMENTED = "documented"
|
|
DERIVED = "derived"
|
|
RESPONSE_READY = "response_ready"
|
|
SAME_SOURCE_READY = "same_source_ready"
|
|
LIVE_READONLY = "live_readonly"
|
|
LIVE_WRITE = "live_write"
|
|
BLOCKED = "blocked"
|
|
FORMAL_EXCEPTION = "formal_exception"
|
|
|
|
|
|
OUTCOME_SCORE: dict[RuleOutcome, int] = {
|
|
RuleOutcome.COVERED: 100,
|
|
RuleOutcome.EXCEPTION: 82,
|
|
RuleOutcome.PARTIAL: 58,
|
|
RuleOutcome.MISSING: 18,
|
|
RuleOutcome.BLOCKED: 0,
|
|
}
|
|
|
|
|
|
TRUTH_SCORE: dict[TruthState, int] = {
|
|
TruthState.UNKNOWN: 0,
|
|
TruthState.CATALOG_ONLY: 20,
|
|
TruthState.DOCUMENTED: 38,
|
|
TruthState.DERIVED: 52,
|
|
TruthState.RESPONSE_READY: 70,
|
|
TruthState.SAME_SOURCE_READY: 82,
|
|
TruthState.LIVE_READONLY: 88,
|
|
TruthState.LIVE_WRITE: 94,
|
|
TruthState.FORMAL_EXCEPTION: 76,
|
|
TruthState.BLOCKED: 0,
|
|
}
|
|
|
|
|
|
MCP_TRANSIT_FIELDS: tuple[str, ...] = (
|
|
"origin",
|
|
"destination",
|
|
"tool",
|
|
"payload",
|
|
"actor",
|
|
"permission",
|
|
"result",
|
|
"traceId",
|
|
"auditId",
|
|
"timestamp",
|
|
)
|
|
|
|
|
|
CANONICAL_PROJECT_ID = "tudo-para-ia-mais-humana-plataform"
|
|
CURRENT_PROJECT_ID = "tudo-para-ia-mais-humana"
|
|
MCP_CONTROL_PLANE_ID = "tudo-para-ia-mcps-internos-plataform"
|
|
UI_SUPPORT_PLATFORM_ID = "tudo-para-ia-ui-platform"
|
|
|
|
|
|
@dataclass(frozen=True, slots=True)
|
|
class HumanControlRule:
|
|
"""One rule linking a platform, a human profile, and an operational surface."""
|
|
|
|
rule_id: str
|
|
scope: RuleScope
|
|
platform_id: str
|
|
profile_id: str
|
|
title: str
|
|
purpose: str
|
|
source_of_truth: str
|
|
required_surfaces: tuple[str, ...]
|
|
success_markers: tuple[str, ...]
|
|
evidence_terms: tuple[str, ...]
|
|
negative_terms: tuple[str, ...]
|
|
mcp_transit_fields: tuple[str, ...]
|
|
expected_payload_fields: tuple[str, ...]
|
|
validation_steps: tuple[str, ...]
|
|
next_order_hint: str
|
|
priority: str = "media"
|
|
generated_from: str = "catalog"
|
|
canonical_project_id: str = CANONICAL_PROJECT_ID
|
|
control_plane_id: str = MCP_CONTROL_PLANE_ID
|
|
ui_support_platform_id: str = UI_SUPPORT_PLATFORM_ID
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
return as_plain_data(self)
|
|
|
|
@property
|
|
def slug(self) -> str:
|
|
return slugify(self.title)
|
|
|
|
@property
|
|
def is_mcp_bound(self) -> bool:
|
|
return self.control_plane_id in self.source_of_truth or "mcp" in self.source_of_truth.lower()
|
|
|
|
@property
|
|
def field_count(self) -> int:
|
|
return len(self.mcp_transit_fields) + len(self.expected_payload_fields)
|
|
|
|
def mentions(self, text: str) -> bool:
|
|
lowered = text.lower()
|
|
values = (
|
|
self.rule_id,
|
|
self.platform_id,
|
|
self.profile_id,
|
|
self.title,
|
|
self.purpose,
|
|
self.source_of_truth,
|
|
" ".join(self.required_surfaces),
|
|
" ".join(self.success_markers),
|
|
)
|
|
return any(value.lower() in lowered for value in values if value)
|
|
|
|
|
|
@dataclass(frozen=True, slots=True)
|
|
class RuleEvidenceHit:
|
|
"""Evidence hit found while evaluating a rule against generated reports."""
|
|
|
|
path: str
|
|
summary: str
|
|
term: str
|
|
confidence: float
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
return as_plain_data(self)
|
|
|
|
|
|
@dataclass(frozen=True, slots=True)
|
|
class RuleCoverage:
|
|
"""Coverage decision for one rule."""
|
|
|
|
rule_id: str
|
|
platform_id: str
|
|
profile_id: str
|
|
scope: RuleScope
|
|
outcome: RuleOutcome
|
|
truth_state: TruthState
|
|
score: int
|
|
reason: str
|
|
evidence: tuple[RuleEvidenceHit, ...]
|
|
missing_terms: tuple[str, ...]
|
|
next_order_hint: str
|
|
validation_steps: tuple[str, ...]
|
|
generated_at: str = field(default_factory=utc_now)
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
return as_plain_data(self)
|
|
|
|
@property
|
|
def needs_order(self) -> bool:
|
|
return self.outcome in {RuleOutcome.MISSING, RuleOutcome.BLOCKED, RuleOutcome.PARTIAL}
|
|
|
|
@property
|
|
def compact_status(self) -> str:
|
|
return f"{self.outcome.value}/{self.truth_state.value}/{self.score}"
|
|
|
|
|
|
@dataclass(frozen=True, slots=True)
|
|
class RulebookReport:
|
|
"""Full rulebook evaluation for the current generation run."""
|
|
|
|
project_id: str
|
|
canonical_project_id: str
|
|
generated_at: str
|
|
rules_count: int
|
|
coverage: tuple[RuleCoverage, ...]
|
|
executive_summary: tuple[str, ...]
|
|
active_risks: tuple[str, ...]
|
|
next_order_hints: tuple[str, ...]
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
return as_plain_data(self)
|
|
|
|
@property
|
|
def average_score(self) -> int:
|
|
if not self.coverage:
|
|
return 0
|
|
return round(sum(item.score for item in self.coverage) / len(self.coverage))
|
|
|
|
@property
|
|
def blocked_count(self) -> int:
|
|
return sum(1 for item in self.coverage if item.outcome == RuleOutcome.BLOCKED)
|
|
|
|
@property
|
|
def partial_count(self) -> int:
|
|
return sum(1 for item in self.coverage if item.outcome == RuleOutcome.PARTIAL)
|
|
|
|
@property
|
|
def missing_count(self) -> int:
|
|
return sum(1 for item in self.coverage if item.outcome == RuleOutcome.MISSING)
|
|
|
|
|
|
def _generated_rules() -> tuple[HumanControlRule, ...]:
|
|
"""Import the generated rulebook lazily to avoid an import cycle."""
|
|
|
|
from .generated_human_rulebook import RULES
|
|
|
|
return RULES
|
|
|
|
|
|
def iter_rules() -> tuple[HumanControlRule, ...]:
|
|
return _generated_rules()
|
|
|
|
|
|
def rules_for_platform(platform_id: str) -> tuple[HumanControlRule, ...]:
|
|
return tuple(rule for rule in iter_rules() if rule.platform_id == platform_id)
|
|
|
|
|
|
def rules_for_profile(profile_id: str) -> tuple[HumanControlRule, ...]:
|
|
return tuple(rule for rule in iter_rules() if rule.profile_id == profile_id)
|
|
|
|
|
|
def rules_for_scope(scope: RuleScope) -> tuple[HumanControlRule, ...]:
|
|
return tuple(rule for rule in iter_rules() if rule.scope == scope)
|
|
|
|
|
|
def _report_corpus(report: PlatformHumanReport) -> str:
|
|
parts: list[str] = [
|
|
report.platform.platform_id,
|
|
report.platform.title,
|
|
report.platform.mission,
|
|
report.scan.repo_path,
|
|
report.scan.readme_excerpt,
|
|
" ".join(report.platform.expected_surfaces),
|
|
" ".join(report.platform.known_blockers),
|
|
" ".join(report.scan.warnings),
|
|
report.summary,
|
|
" ".join(report.current_state),
|
|
" ".join(report.future_state),
|
|
" ".join(report.missing_for_humans),
|
|
]
|
|
for evidence in report.scan.evidence[:500]:
|
|
parts.append(evidence.path)
|
|
parts.append(evidence.summary)
|
|
parts.append(evidence.kind.value)
|
|
parts.append(" ".join(evidence.tags))
|
|
for recommendation in report.recommendations:
|
|
parts.append(recommendation.title)
|
|
parts.append(recommendation.reason)
|
|
parts.append(recommendation.expected_impact)
|
|
parts.append(" ".join(recommendation.affected_paths))
|
|
for cell in report.cells:
|
|
parts.append(cell.profile_id)
|
|
parts.append(cell.maturity.value)
|
|
parts.append(cell.explanation)
|
|
parts.append(" ".join(cell.strengths))
|
|
parts.append(" ".join(cell.gaps))
|
|
return "\n".join(item for item in parts if item).lower()
|
|
|
|
|
|
def _reports_by_platform(reports: Sequence[PlatformHumanReport]) -> Mapping[str, PlatformHumanReport]:
|
|
return {report.platform.platform_id: report for report in reports}
|
|
|
|
|
|
def _hits_for_rule(rule: HumanControlRule, corpus: str, report: PlatformHumanReport | None) -> tuple[RuleEvidenceHit, ...]:
|
|
hits: list[RuleEvidenceHit] = []
|
|
terms = merge_unique(rule.evidence_terms + rule.success_markers + rule.required_surfaces)
|
|
for term in terms:
|
|
if term and term.lower() in corpus:
|
|
hits.append(
|
|
RuleEvidenceHit(
|
|
path=report.scan.repo_path if report is not None else rule.source_of_truth,
|
|
summary=f"Termo encontrado para regra: {term}",
|
|
term=term,
|
|
confidence=0.72,
|
|
)
|
|
)
|
|
if len(hits) >= 12:
|
|
break
|
|
return tuple(hits)
|
|
|
|
|
|
def _missing_terms(rule: HumanControlRule, corpus: str) -> tuple[str, ...]:
|
|
candidates = merge_unique(rule.success_markers + rule.required_surfaces)
|
|
missing = [term for term in candidates if term and term.lower() not in corpus]
|
|
return tuple(missing[:12])
|
|
|
|
|
|
def _truth_state_for_rule(rule: HumanControlRule, corpus: str, negative_hits: int) -> TruthState:
|
|
lowered = corpus.lower()
|
|
if negative_hits:
|
|
if "catalogonly" in lowered or "catalog_only" in lowered:
|
|
return TruthState.CATALOG_ONLY
|
|
if "unsupported" in lowered or "needs_token" in lowered or "blocked" in lowered:
|
|
return TruthState.BLOCKED
|
|
if "live_write" in lowered or "write readback" in lowered or "persist" in lowered:
|
|
return TruthState.LIVE_WRITE
|
|
if "live_readonly" in lowered or "readonly" in lowered or "readback" in lowered:
|
|
return TruthState.LIVE_READONLY
|
|
if "sameSource".lower() in lowered or "same-source" in lowered or "mesma fonte" in lowered:
|
|
return TruthState.SAME_SOURCE_READY
|
|
if "responseReady".lower() in lowered or "response-ready" in lowered:
|
|
return TruthState.RESPONSE_READY
|
|
if "contract" in lowered or "contrato" in lowered or "readiness" in lowered:
|
|
return TruthState.DERIVED
|
|
if "readme" in lowered or "docs" in lowered or "document" in lowered:
|
|
return TruthState.DOCUMENTED
|
|
return TruthState.UNKNOWN
|
|
|
|
|
|
def _outcome_for_rule(
|
|
rule: HumanControlRule,
|
|
positive_hits: int,
|
|
missing_terms: Sequence[str],
|
|
negative_hits: int,
|
|
truth_state: TruthState,
|
|
) -> RuleOutcome:
|
|
if truth_state == TruthState.CATALOG_ONLY and rule.platform_id == "docs":
|
|
return RuleOutcome.BLOCKED
|
|
if truth_state == TruthState.BLOCKED:
|
|
return RuleOutcome.BLOCKED
|
|
if negative_hits and positive_hits < max(2, len(rule.success_markers) // 2):
|
|
return RuleOutcome.BLOCKED
|
|
if positive_hits >= max(3, len(rule.success_markers) // 2) and not missing_terms:
|
|
return RuleOutcome.COVERED
|
|
if positive_hits >= 2:
|
|
return RuleOutcome.PARTIAL
|
|
if truth_state == TruthState.FORMAL_EXCEPTION:
|
|
return RuleOutcome.EXCEPTION
|
|
return RuleOutcome.MISSING
|
|
|
|
|
|
def _coverage_score(outcome: RuleOutcome, truth_state: TruthState, positive_hits: int, missing_count: int) -> int:
|
|
base = OUTCOME_SCORE[outcome]
|
|
truth_bonus = round(TRUTH_SCORE[truth_state] * 0.2)
|
|
evidence_bonus = min(14, positive_hits * 2)
|
|
missing_penalty = min(30, missing_count * 3)
|
|
return max(0, min(100, base + truth_bonus + evidence_bonus - missing_penalty))
|
|
|
|
|
|
def evaluate_rule(rule: HumanControlRule, reports: Sequence[PlatformHumanReport]) -> RuleCoverage:
|
|
by_platform = _reports_by_platform(reports)
|
|
report = by_platform.get(rule.platform_id)
|
|
corpus = _report_corpus(report) if report is not None else ""
|
|
positive_hits = sum(1 for term in merge_unique(rule.evidence_terms + rule.success_markers) if term.lower() in corpus)
|
|
negative_hits = sum(1 for term in rule.negative_terms if term.lower() in corpus)
|
|
missing = _missing_terms(rule, corpus)
|
|
truth_state = _truth_state_for_rule(rule, corpus, negative_hits)
|
|
outcome = _outcome_for_rule(rule, positive_hits, missing, negative_hits, truth_state)
|
|
score = _coverage_score(outcome, truth_state, positive_hits, len(missing))
|
|
if report is None:
|
|
reason = "Repositorio ou relatorio de plataforma nao encontrado para a regra."
|
|
elif outcome == RuleOutcome.COVERED:
|
|
reason = "A regra possui sinais suficientes nos relatorios e evidencias da plataforma."
|
|
elif outcome == RuleOutcome.PARTIAL:
|
|
reason = "A regra possui sinais parciais, mas ainda falta superficie, marcador ou prova direta."
|
|
elif outcome == RuleOutcome.BLOCKED:
|
|
reason = "A regra encontrou bloqueio ou estado catalogOnly/unsupported que impede maturidade humana plena."
|
|
elif outcome == RuleOutcome.EXCEPTION:
|
|
reason = "A regra depende de excecao formal registrada como decisao de governanca."
|
|
else:
|
|
reason = "A regra ainda nao encontrou evidencias suficientes."
|
|
return RuleCoverage(
|
|
rule_id=rule.rule_id,
|
|
platform_id=rule.platform_id,
|
|
profile_id=rule.profile_id,
|
|
scope=rule.scope,
|
|
outcome=outcome,
|
|
truth_state=truth_state,
|
|
score=score,
|
|
reason=reason,
|
|
evidence=_hits_for_rule(rule, corpus, report),
|
|
missing_terms=missing,
|
|
next_order_hint=rule.next_order_hint,
|
|
validation_steps=rule.validation_steps,
|
|
)
|
|
|
|
|
|
def evaluate_rulebook(reports: Sequence[PlatformHumanReport], *, limit: int | None = None) -> RulebookReport:
|
|
rules = iter_rules()
|
|
selected = rules if limit is None else rules[:limit]
|
|
coverage = tuple(evaluate_rule(rule, reports) for rule in selected)
|
|
risks = []
|
|
hints = []
|
|
for item in sorted(coverage, key=lambda cov: (cov.score, cov.platform_id, cov.profile_id, cov.rule_id)):
|
|
if item.outcome in {RuleOutcome.BLOCKED, RuleOutcome.MISSING}:
|
|
risks.append(f"{item.platform_id}/{item.profile_id}/{item.scope.value}: {item.reason}")
|
|
hints.append(item.next_order_hint)
|
|
summary = (
|
|
f"Regras avaliadas: {len(coverage)}",
|
|
f"Score medio do rulebook: {round(sum(item.score for item in coverage) / len(coverage)) if coverage else 0}",
|
|
f"Bloqueadas: {sum(1 for item in coverage if item.outcome == RuleOutcome.BLOCKED)}",
|
|
f"Parciais: {sum(1 for item in coverage if item.outcome == RuleOutcome.PARTIAL)}",
|
|
f"Sem evidencia: {sum(1 for item in coverage if item.outcome == RuleOutcome.MISSING)}",
|
|
f"Projeto canonico recomendado: {CANONICAL_PROJECT_ID}",
|
|
f"Caminho administrativo obrigatorio: {MCP_CONTROL_PLANE_ID}",
|
|
)
|
|
return RulebookReport(
|
|
project_id=CURRENT_PROJECT_ID,
|
|
canonical_project_id=CANONICAL_PROJECT_ID,
|
|
generated_at=utc_now(),
|
|
rules_count=len(rules),
|
|
coverage=coverage,
|
|
executive_summary=summary,
|
|
active_risks=merge_unique(risks)[:40],
|
|
next_order_hints=merge_unique(hints)[:20],
|
|
)
|
|
|
|
|
|
def rulebook_rows(report: RulebookReport) -> list[list[str]]:
|
|
rows = [[
|
|
"rule_id",
|
|
"platform",
|
|
"profile",
|
|
"scope",
|
|
"outcome",
|
|
"truth_state",
|
|
"score",
|
|
"missing_terms",
|
|
"next_order_hint",
|
|
]]
|
|
for item in sorted(report.coverage, key=lambda cov: (cov.platform_id, cov.profile_id, cov.rule_id)):
|
|
rows.append(
|
|
[
|
|
item.rule_id,
|
|
item.platform_id,
|
|
item.profile_id,
|
|
item.scope.value,
|
|
item.outcome.value,
|
|
item.truth_state.value,
|
|
str(item.score),
|
|
"; ".join(item.missing_terms),
|
|
item.next_order_hint,
|
|
]
|
|
)
|
|
return rows
|
|
|
|
|
|
def rows_to_csv(rows: Sequence[Sequence[str]]) -> str:
|
|
buffer = io.StringIO()
|
|
writer = csv.writer(buffer, lineterminator="\n")
|
|
writer.writerows(rows)
|
|
return buffer.getvalue()
|
|
|
|
|
|
def rulebook_csv(report: RulebookReport) -> str:
|
|
return rows_to_csv(rulebook_rows(report))
|
|
|
|
|
|
def rulebook_markdown(report: RulebookReport) -> str:
|
|
lines = [
|
|
"# Rulebook humano-operacional",
|
|
"",
|
|
f"- project_id_atual: `{report.project_id}`",
|
|
f"- project_id_canonico_recomendado: `{report.canonical_project_id}`",
|
|
f"- generated_at: `{report.generated_at}`",
|
|
f"- regras_geradas: `{report.rules_count}`",
|
|
f"- regras_avaliadas: `{len(report.coverage)}`",
|
|
f"- score_medio: `{report.average_score}`",
|
|
f"- bloqueadas: `{report.blocked_count}`",
|
|
f"- parciais: `{report.partial_count}`",
|
|
f"- sem_evidencia: `{report.missing_count}`",
|
|
"",
|
|
"## Sumario",
|
|
"",
|
|
]
|
|
lines.extend(f"- {item}" for item in report.executive_summary)
|
|
lines.extend(["", "## Riscos ativos", ""])
|
|
if report.active_risks:
|
|
lines.extend(f"- {item}" for item in report.active_risks[:30])
|
|
else:
|
|
lines.append("- Nenhum risco ativo no rulebook avaliado.")
|
|
lines.extend(["", "## Proximas ordens sugeridas", ""])
|
|
if report.next_order_hints:
|
|
lines.extend(f"- {item}" for item in report.next_order_hints[:20])
|
|
else:
|
|
lines.append("- Manter regressao e evidencias.")
|
|
lines.extend(["", "## Cobertura por plataforma", ""])
|
|
grouped: dict[str, list[RuleCoverage]] = {}
|
|
for item in report.coverage:
|
|
grouped.setdefault(item.platform_id, []).append(item)
|
|
for platform_id in sorted(grouped):
|
|
items = grouped[platform_id]
|
|
avg = round(sum(item.score for item in items) / len(items)) if items else 0
|
|
blocked = sum(1 for item in items if item.outcome == RuleOutcome.BLOCKED)
|
|
partial = sum(1 for item in items if item.outcome == RuleOutcome.PARTIAL)
|
|
missing = sum(1 for item in items if item.outcome == RuleOutcome.MISSING)
|
|
lines.append(f"### {platform_id}")
|
|
lines.append("")
|
|
lines.append(f"- score: `{avg}`")
|
|
lines.append(f"- bloqueadas: `{blocked}`")
|
|
lines.append(f"- parciais: `{partial}`")
|
|
lines.append(f"- sem_evidencia: `{missing}`")
|
|
for item in sorted(items, key=lambda cov: (cov.score, cov.scope.value, cov.profile_id))[:8]:
|
|
lines.append(
|
|
f"- `{item.compact_status}` `{item.profile_id}` `{item.scope.value}` "
|
|
f"{item.reason} Proxima OS: {item.next_order_hint}"
|
|
)
|
|
lines.append("")
|
|
return "\n".join(lines).strip() + "\n"
|
|
|
|
|
|
def rulebook_compact_json(report: RulebookReport) -> dict[str, Any]:
|
|
return {
|
|
"project_id": report.project_id,
|
|
"canonical_project_id": report.canonical_project_id,
|
|
"generated_at": report.generated_at,
|
|
"rules_count": report.rules_count,
|
|
"coverage_count": len(report.coverage),
|
|
"average_score": report.average_score,
|
|
"blocked_count": report.blocked_count,
|
|
"partial_count": report.partial_count,
|
|
"missing_count": report.missing_count,
|
|
"executive_summary": list(report.executive_summary),
|
|
"active_risks": list(report.active_risks[:20]),
|
|
"next_order_hints": list(report.next_order_hints[:20]),
|
|
"coverage": [
|
|
{
|
|
"rule_id": item.rule_id,
|
|
"platform_id": item.platform_id,
|
|
"profile_id": item.profile_id,
|
|
"scope": item.scope.value,
|
|
"outcome": item.outcome.value,
|
|
"truth_state": item.truth_state.value,
|
|
"score": item.score,
|
|
"next_order_hint": item.next_order_hint,
|
|
}
|
|
for item in report.coverage
|
|
],
|
|
}
|
|
|
|
|
|
def rulebook_artifact_records(project_root: Path) -> tuple[dict[str, str], ...]:
|
|
records: list[dict[str, str]] = []
|
|
for rel, description, function, file_type in (
|
|
("dados/rulebook-humano-operacional.json", "Rulebook completo de controle humano.", "rulebook humano", "json"),
|
|
("dados/rulebook-humano-operacional-compacto.json", "Rulebook compacto para consumo por MCP/UI.", "rulebook compacto", "json"),
|
|
("ecossistema/RULEBOOK-HUMANO-OPERACIONAL.md", "Relatorio Markdown do rulebook humano-operacional.", "rulebook humano", "markdown"),
|
|
("matrizes/rulebook-humano-operacional.csv", "Matriz CSV de cobertura do rulebook.", "matriz rulebook", "csv"),
|
|
):
|
|
records.append(
|
|
{
|
|
"path": str(project_root / rel),
|
|
"description": description,
|
|
"function": function,
|
|
"file_type": file_type,
|
|
}
|
|
)
|
|
return tuple(records)
|
|
|