auto-sync: tudo-para-ia-mais-humana 2026-05-04 13:58:55

This commit is contained in:
2026-05-04 13:58:55 -03:00
parent 614a6e81e1
commit 0aa19f2340
2 changed files with 119 additions and 0 deletions

View File

@@ -48,6 +48,25 @@ class DevelopmentActionAssessment:
human_explanation: str
@dataclass(frozen=True)
class HumanReadinessSignal:
decision_id: str
status: str
visible_to_profile: str
evidence_fields: tuple[str, ...]
missing_for_human_confidence: tuple[str, ...]
next_question: str
@dataclass(frozen=True)
class HumanPolicyPanel:
policy_version: str
ok: bool
generated_at: str
signals: tuple[HumanReadinessSignal, ...]
summary: dict[str, int]
def _impact(profile_id: str, expectation: str, signal: str, risk: str, question: str) -> HumanImpact:
return HumanImpact(
profile_id=profile_id,
@@ -336,3 +355,83 @@ def classify_development_action_for_humans(action: str, environment: str = "deve
required_controls=("truthState", "actorId", "auditId", "dryRunWhenApplicable"),
human_explanation="Simulacao auditavel em desenvolvimento pode ser executada para avancar a rodada.",
)
def build_human_policy_readiness_panel(
available_evidence: dict[str, tuple[str, ...]] | None = None,
) -> HumanPolicyPanel:
default_evidence_by_decision = {
"docs_full_operational_platform": ("documentId", "sourceHash", "authority", "truthState", "missingTopics"),
"mcp_required_cross_platform_acceptance": (
"originPlatformId",
"destinationPlatformId",
"contractHash",
"sourceRecordsHash",
"permissionId",
"evidenceId",
"traceId",
"auditId",
),
"ten_year_institutional_retention": (
"retentionClass",
"retentionYears",
"legalHold",
"rollbackPlan",
"beforeAfterEvidence",
),
"live_sensitive_data_purge_forbidden": (
"cleanupClassification",
"allowedTerms",
"forbiddenTerms",
"sensitiveDataCheck",
),
"canonical_platform_names": ("canonicalName", "aliasStatus", "ownerPlatformId", "providerId"),
"development_execution_for_gpt_codex": ("truthState", "actorId", "auditId", "dryRun", "environment"),
}
evidence_by_decision = {
**default_evidence_by_decision,
**(available_evidence or {}),
}
signals: list[HumanReadinessSignal] = []
for decision in INSTITUTIONAL_DECISIONS:
evidence = tuple(evidence_by_decision.get(decision.decision_id, ()))
expected = set(decision.expected_mcp_evidence)
missing = tuple(sorted(expected.difference(evidence)))
status = "ready" if not missing else "partial" if evidence else "blocked"
profile = decision.affected_profiles[0]
signals.append(
HumanReadinessSignal(
decision_id=decision.decision_id,
status=status,
visible_to_profile=profile.profile_id,
evidence_fields=evidence,
missing_for_human_confidence=missing,
next_question=profile.next_human_question if missing else "A evidencia ja permite resposta humana equivalente?",
)
)
summary = {
"ready": sum(1 for signal in signals if signal.status == "ready"),
"partial": sum(1 for signal in signals if signal.status == "partial"),
"blocked": sum(1 for signal in signals if signal.status == "blocked"),
"total": len(signals),
}
return HumanPolicyPanel(
policy_version=POLICY_VERSION,
ok=summary["blocked"] == 0,
generated_at="2026-05-04T00:00:00.000Z",
signals=tuple(signals),
summary=summary,
)
def export_human_policy_readiness_panel(
available_evidence: dict[str, tuple[str, ...]] | None = None,
) -> dict[str, object]:
panel = build_human_policy_readiness_panel(available_evidence)
return {
"ok": panel.ok,
"policy_version": panel.policy_version,
"generated_at": panel.generated_at,
"summary": panel.summary,
"signals": [asdict(signal) for signal in panel.signals],
}