auto-sync: tudo-para-ia-mais-humana 2026-05-04 13:58:55
This commit is contained in:
@@ -48,6 +48,25 @@ class DevelopmentActionAssessment:
|
||||
human_explanation: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class HumanReadinessSignal:
|
||||
decision_id: str
|
||||
status: str
|
||||
visible_to_profile: str
|
||||
evidence_fields: tuple[str, ...]
|
||||
missing_for_human_confidence: tuple[str, ...]
|
||||
next_question: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class HumanPolicyPanel:
|
||||
policy_version: str
|
||||
ok: bool
|
||||
generated_at: str
|
||||
signals: tuple[HumanReadinessSignal, ...]
|
||||
summary: dict[str, int]
|
||||
|
||||
|
||||
def _impact(profile_id: str, expectation: str, signal: str, risk: str, question: str) -> HumanImpact:
|
||||
return HumanImpact(
|
||||
profile_id=profile_id,
|
||||
@@ -336,3 +355,83 @@ def classify_development_action_for_humans(action: str, environment: str = "deve
|
||||
required_controls=("truthState", "actorId", "auditId", "dryRunWhenApplicable"),
|
||||
human_explanation="Simulacao auditavel em desenvolvimento pode ser executada para avancar a rodada.",
|
||||
)
|
||||
|
||||
|
||||
def build_human_policy_readiness_panel(
|
||||
available_evidence: dict[str, tuple[str, ...]] | None = None,
|
||||
) -> HumanPolicyPanel:
|
||||
default_evidence_by_decision = {
|
||||
"docs_full_operational_platform": ("documentId", "sourceHash", "authority", "truthState", "missingTopics"),
|
||||
"mcp_required_cross_platform_acceptance": (
|
||||
"originPlatformId",
|
||||
"destinationPlatformId",
|
||||
"contractHash",
|
||||
"sourceRecordsHash",
|
||||
"permissionId",
|
||||
"evidenceId",
|
||||
"traceId",
|
||||
"auditId",
|
||||
),
|
||||
"ten_year_institutional_retention": (
|
||||
"retentionClass",
|
||||
"retentionYears",
|
||||
"legalHold",
|
||||
"rollbackPlan",
|
||||
"beforeAfterEvidence",
|
||||
),
|
||||
"live_sensitive_data_purge_forbidden": (
|
||||
"cleanupClassification",
|
||||
"allowedTerms",
|
||||
"forbiddenTerms",
|
||||
"sensitiveDataCheck",
|
||||
),
|
||||
"canonical_platform_names": ("canonicalName", "aliasStatus", "ownerPlatformId", "providerId"),
|
||||
"development_execution_for_gpt_codex": ("truthState", "actorId", "auditId", "dryRun", "environment"),
|
||||
}
|
||||
evidence_by_decision = {
|
||||
**default_evidence_by_decision,
|
||||
**(available_evidence or {}),
|
||||
}
|
||||
signals: list[HumanReadinessSignal] = []
|
||||
for decision in INSTITUTIONAL_DECISIONS:
|
||||
evidence = tuple(evidence_by_decision.get(decision.decision_id, ()))
|
||||
expected = set(decision.expected_mcp_evidence)
|
||||
missing = tuple(sorted(expected.difference(evidence)))
|
||||
status = "ready" if not missing else "partial" if evidence else "blocked"
|
||||
profile = decision.affected_profiles[0]
|
||||
signals.append(
|
||||
HumanReadinessSignal(
|
||||
decision_id=decision.decision_id,
|
||||
status=status,
|
||||
visible_to_profile=profile.profile_id,
|
||||
evidence_fields=evidence,
|
||||
missing_for_human_confidence=missing,
|
||||
next_question=profile.next_human_question if missing else "A evidencia ja permite resposta humana equivalente?",
|
||||
)
|
||||
)
|
||||
summary = {
|
||||
"ready": sum(1 for signal in signals if signal.status == "ready"),
|
||||
"partial": sum(1 for signal in signals if signal.status == "partial"),
|
||||
"blocked": sum(1 for signal in signals if signal.status == "blocked"),
|
||||
"total": len(signals),
|
||||
}
|
||||
return HumanPolicyPanel(
|
||||
policy_version=POLICY_VERSION,
|
||||
ok=summary["blocked"] == 0,
|
||||
generated_at="2026-05-04T00:00:00.000Z",
|
||||
signals=tuple(signals),
|
||||
summary=summary,
|
||||
)
|
||||
|
||||
|
||||
def export_human_policy_readiness_panel(
|
||||
available_evidence: dict[str, tuple[str, ...]] | None = None,
|
||||
) -> dict[str, object]:
|
||||
panel = build_human_policy_readiness_panel(available_evidence)
|
||||
return {
|
||||
"ok": panel.ok,
|
||||
"policy_version": panel.policy_version,
|
||||
"generated_at": panel.generated_at,
|
||||
"summary": panel.summary,
|
||||
"signals": [asdict(signal) for signal in panel.signals],
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ import unittest
|
||||
from mais_humana.institutional_decisions import (
|
||||
PLATFORM_ALIASES,
|
||||
build_human_decision_matrix,
|
||||
build_human_policy_readiness_panel,
|
||||
classify_development_action_for_humans,
|
||||
export_human_policy_readiness_panel,
|
||||
list_institutional_decisions,
|
||||
summarize_decisions,
|
||||
)
|
||||
@@ -66,6 +68,24 @@ class InstitutionalDecisionHumanTests(unittest.TestCase):
|
||||
self.assertFalse(secret.allowed)
|
||||
self.assertEqual(secret.decision, "blocked_secret_exposure")
|
||||
|
||||
def test_human_policy_readiness_panel_marks_missing_evidence_as_partial(self) -> None:
|
||||
panel = build_human_policy_readiness_panel(
|
||||
{
|
||||
"docs_full_operational_platform": ("documentId", "sourceHash", "authority", "truthState", "missingTopics"),
|
||||
"mcp_required_cross_platform_acceptance": ("originPlatformId", "contractHash"),
|
||||
}
|
||||
)
|
||||
self.assertTrue(panel.ok)
|
||||
self.assertEqual(panel.summary["total"], 6)
|
||||
mcp_signal = [signal for signal in panel.signals if signal.decision_id == "mcp_required_cross_platform_acceptance"][0]
|
||||
self.assertEqual(mcp_signal.status, "partial")
|
||||
self.assertIn("destinationPlatformId", mcp_signal.missing_for_human_confidence)
|
||||
|
||||
exported = export_human_policy_readiness_panel()
|
||||
self.assertTrue(exported["ok"])
|
||||
self.assertEqual(exported["summary"]["ready"], 6)
|
||||
self.assertEqual(len(exported["signals"]), 6)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user