117 lines
5.4 KiB
Python
117 lines
5.4 KiB
Python
from __future__ import annotations
|
|
|
|
import unittest
|
|
|
|
from mais_humana.institutional_decisions import (
|
|
PLATFORM_ALIASES,
|
|
build_human_decision_matrix,
|
|
build_human_institutional_evidence_gap_analysis,
|
|
build_human_policy_readiness_panel,
|
|
classify_development_action_for_humans,
|
|
export_human_policy_readiness_panel,
|
|
list_institutional_decisions,
|
|
summarize_decisions,
|
|
)
|
|
|
|
|
|
class InstitutionalDecisionHumanTests(unittest.TestCase):
|
|
def test_six_decisions_are_available_for_human_translation(self) -> None:
|
|
decisions = list_institutional_decisions()
|
|
self.assertEqual(len(decisions), 6)
|
|
self.assertEqual(
|
|
[decision.decision_id for decision in decisions],
|
|
[
|
|
"docs_full_operational_platform",
|
|
"mcp_required_cross_platform_acceptance",
|
|
"ten_year_institutional_retention",
|
|
"live_sensitive_data_purge_forbidden",
|
|
"canonical_platform_names",
|
|
"development_execution_for_gpt_codex",
|
|
],
|
|
)
|
|
self.assertTrue(all(decision.affected_profiles for decision in decisions))
|
|
self.assertTrue(all(decision.expected_mcp_evidence for decision in decisions))
|
|
|
|
def test_summary_exposes_profiles_evidence_and_blockers(self) -> None:
|
|
summary = summarize_decisions()
|
|
self.assertEqual(summary["decisions"], 6)
|
|
self.assertGreaterEqual(summary["profile_count"], 5)
|
|
self.assertIn("sourceRecordsHash", summary["expected_mcp_evidence_fields"])
|
|
self.assertIn("segredo_exposto", summary["blockers"])
|
|
self.assertGreaterEqual(summary["ready_criteria_count"], 18)
|
|
|
|
def test_matrix_keeps_biblioteca_privada_outside_platforms(self) -> None:
|
|
matrix = build_human_decision_matrix()
|
|
self.assertTrue(matrix["ok"])
|
|
self.assertEqual(matrix["summary"]["decisions"], 6)
|
|
aliases = matrix["platform_aliases"]
|
|
biblioteca = [alias for alias in aliases if alias["current_name"] == "tudo-para-ia-biblioteca-privada"][0]
|
|
self.assertFalse(biblioteca["is_platform"])
|
|
self.assertEqual(biblioteca["owner_platform_id"], "tudo-para-ia-integracoes-platform")
|
|
self.assertIn("Mais Humana traduz impacto", matrix["human_boundary"])
|
|
|
|
def test_platform_aliases_document_legacy_platform_suffixes(self) -> None:
|
|
legacy = [alias for alias in PLATFORM_ALIASES if alias.alias_status == "legacy_alias"]
|
|
self.assertGreaterEqual(len(legacy), 3)
|
|
self.assertTrue(all(alias.canonical_name.endswith("-platform") for alias in legacy))
|
|
|
|
def test_development_action_assessment_is_broad_but_blocks_secret_and_external_effect(self) -> None:
|
|
allowed = classify_development_action_for_humans("simular usuario tenant compra e painel")
|
|
self.assertTrue(allowed.allowed)
|
|
self.assertEqual(allowed.decision, "development_simulation_allowed")
|
|
self.assertIn("truthState", allowed.required_controls)
|
|
|
|
external = classify_development_action_for_humans("send real message to customer")
|
|
self.assertFalse(external.allowed)
|
|
self.assertEqual(external.decision, "explicit_authorization_required")
|
|
|
|
secret = classify_development_action_for_humans("mostrar token secret")
|
|
self.assertFalse(secret.allowed)
|
|
self.assertEqual(secret.decision, "blocked_secret_exposure")
|
|
|
|
def test_human_policy_readiness_panel_marks_missing_evidence_as_partial(self) -> None:
|
|
panel = build_human_policy_readiness_panel(
|
|
{
|
|
"docs_full_operational_platform": ("documentId", "sourceHash", "authority", "truthState", "missingTopics"),
|
|
"mcp_required_cross_platform_acceptance": ("originPlatformId", "contractHash"),
|
|
}
|
|
)
|
|
self.assertTrue(panel.ok)
|
|
self.assertEqual(panel.summary["total"], 6)
|
|
mcp_signal = [signal for signal in panel.signals if signal.decision_id == "mcp_required_cross_platform_acceptance"][0]
|
|
self.assertEqual(mcp_signal.status, "partial")
|
|
self.assertIn("destinationPlatformId", mcp_signal.missing_for_human_confidence)
|
|
|
|
exported = export_human_policy_readiness_panel()
|
|
self.assertTrue(exported["ok"])
|
|
self.assertEqual(exported["summary"]["ready"], 6)
|
|
self.assertEqual(len(exported["signals"]), 6)
|
|
|
|
def test_human_evidence_gap_analysis_explains_missing_confidence_fields(self) -> None:
|
|
analysis = build_human_institutional_evidence_gap_analysis(
|
|
{
|
|
"docs_full_operational_platform": (
|
|
"documentId",
|
|
"sourceHash",
|
|
"authority",
|
|
"truthState",
|
|
"missingTopics",
|
|
),
|
|
"mcp_required_cross_platform_acceptance": ("originPlatformId", "contractHash"),
|
|
}
|
|
)
|
|
self.assertFalse(analysis["ok"])
|
|
self.assertEqual(analysis["summary"]["ready"], 1)
|
|
self.assertGreaterEqual(analysis["summary"]["partial"], 1)
|
|
mcp_gap = [
|
|
gap for gap in analysis["gaps"]
|
|
if gap["decision_id"] == "mcp_required_cross_platform_acceptance"
|
|
][0]
|
|
self.assertEqual(mcp_gap["status"], "partial")
|
|
self.assertIn("destinationPlatformId", mcp_gap["missing_evidence"])
|
|
self.assertIn("MCP/Docs continuam donos", analysis["human_boundary"])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|