feat: fundar plataforma mais humana

This commit is contained in:
Ami Soares
2026-04-30 06:42:00 -03:00
commit c9c1056193
183 changed files with 639629 additions and 0 deletions

View File

@@ -0,0 +1,29 @@
"""Human-centered operational analysis for the Tudo Para IA ecosystem."""
from .models import (
Evidence,
HumanNeed,
HumanProfile,
MatrixCell,
PlatformDefinition,
PlatformScan,
Recommendation,
ReportBundle,
)
from .governance_models import EcosystemGovernancePortfolio, PlatformGovernanceCard, RoundExecutionPackage
__all__ = [
"Evidence",
"EcosystemGovernancePortfolio",
"HumanNeed",
"HumanProfile",
"MatrixCell",
"PlatformGovernanceCard",
"PlatformDefinition",
"PlatformScan",
"Recommendation",
"ReportBundle",
"RoundExecutionPackage",
]
__version__ = "0.1.0"

View File

@@ -0,0 +1,122 @@
"""Acceptance checklist for closing a full operational round."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Sequence
from .models import PlatformHumanReport, ReportBundle, ServiceOrder, as_plain_data
@dataclass(slots=True)
class AcceptanceItem:
item_id: str
title: str
passed: bool
evidence: str
required_next_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class AcceptanceReport:
items: tuple[AcceptanceItem, ...]
passed: bool
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def item(item_id: str, title: str, passed: bool, evidence: str, action: str) -> AcceptanceItem:
return AcceptanceItem(
item_id=item_id,
title=title,
passed=passed,
evidence=evidence,
required_next_action="nenhuma" if passed else action,
)
def required_artifacts(project_root: Path) -> tuple[Path, ...]:
return (
project_root / "dados" / "snapshot-ecossistema.json",
project_root / "dados" / "quality-gates.json",
project_root / "dados" / "insights-operacionais.json",
project_root / "matrizes" / "matriz-plataforma-perfil.csv",
project_root / "graficos" / "matriz-plataforma-perfil.svg",
project_root / "graficos" / "maturidade-por-plataforma.svg",
project_root / "relatorios-docx" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.docx",
project_root / "ecossistema" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.md",
project_root / "ecossistema" / "QUALITY-GATE-MAIS-HUMANO.md",
project_root / "ecossistema" / "INSIGHTS-OPERACIONAIS-MAIS-HUMANA.md",
project_root / "pessoas-e-papeis" / "perguntas-humanas-respondidas.md",
project_root / "pessoas-e-papeis" / "playbooks-humanos.md",
)
def build_acceptance_report(
project_root: Path,
reports: Sequence[PlatformHumanReport],
orders: Sequence[ServiceOrder],
bundle: ReportBundle | None,
) -> AcceptanceReport:
artifacts = required_artifacts(project_root)
missing = [path for path in artifacts if not path.exists()]
total_code = sum(report.scan.code_lines for report in reports)
items = [
item(
"artifacts",
"Artefatos minimos gerados",
not missing,
", ".join(str(path) for path in missing[:5]) if missing else "todos encontrados",
"Regenerar relatorios e validar paths esperados.",
),
item(
"platform_reports",
"Relatorios por plataforma",
len(reports) >= 14,
f"{len(reports)} relatorios",
"Completar catalogo ou corrigir scanner das plataformas esperadas.",
),
item(
"code_read",
"Leitura tecnica minima",
total_code >= 10_000,
f"{total_code} linhas de codigo analisadas",
"Expandir varredura para repositorios reais ou registrar limitacao material.",
),
item(
"orders",
"Ordens de saida criadas",
len(orders) >= 10,
f"{len(orders)} ordens",
"Criar 5 executivas e 5 gerenciais de saida com base nas lacunas reais.",
),
item(
"bundle",
"Bundle de geracao completo",
bundle is not None and bundle.matrix_cells >= 14 * 12,
f"{bundle.matrix_cells if bundle else 0} celulas de matriz",
"Reexecutar gerador com catalogo de perfis completo.",
),
]
return AcceptanceReport(items=tuple(items), passed=all(entry.passed for entry in items))
def acceptance_markdown(report: AcceptanceReport) -> str:
lines = ["# Acceptance Checklist Mais Humana", ""]
lines.append(f"- passou: `{report.passed}`")
lines.append("")
for item_result in report.items:
status = "ok" if item_result.passed else "pendente"
lines.append(f"## {item_result.title}")
lines.append("")
lines.append(f"- status: `{status}`")
lines.append(f"- evidencia: {item_result.evidence}")
lines.append(f"- proxima acao: {item_result.required_next_action}")
lines.append("")
return "\n".join(lines).strip() + "\n"

View File

@@ -0,0 +1,835 @@
"""Signal extraction rules for the Mais Humana operational dossier.
This module turns local repository evidence into normalized operational signals.
It does not decide whether a project is good or bad by a single keyword. The
goal is to preserve useful nuance:
* a Docs catalog-only decision can be a formal exception or a blocker;
* a BYOK credential reference is a capability, while a missing tenant smoke is
a blocker;
* Cloudflare plugin denial is expected and must not become a platform blocker;
* wrangler, HTTP evidence, readiness, sameSource, and panelReady are real
operational signals;
* repository, Git, tests, OpenAPI, and security redaction remain separate gates.
"""
from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Sequence
from .models import EvidenceKind, NeedCategory, PlatformHumanReport, PlatformScan, Recommendation, merge_unique, slugify
from .operational_models import (
EvidenceRole,
GateDomain,
HumanReadinessStage,
OperationalSignal,
SignalKind,
SignalSeverity,
SourceConfidence,
SourceReference,
source_refs_from_evidence,
source_refs_from_strings,
stable_digest,
)
@dataclass(slots=True)
class SignalRule:
"""A lightweight matching rule for evidence summaries, warning text, and paths."""
rule_id: str
title: str
kind: SignalKind
domain: GateDomain
severity: SignalSeverity
stage: HumanReadinessStage
categories: tuple[NeedCategory, ...]
patterns: tuple[str, ...]
positive_summary: str
next_action: str
tags: tuple[str, ...] = ()
def matches(self, text: str) -> bool:
lowered = text.lower()
return any(re.search(pattern, lowered, re.I) for pattern in self.patterns)
def rule(
rule_id: str,
title: str,
kind: SignalKind,
domain: GateDomain,
severity: SignalSeverity,
stage: HumanReadinessStage,
categories: Iterable[NeedCategory],
patterns: Iterable[str],
summary: str,
next_action: str,
tags: Iterable[str] = (),
) -> SignalRule:
return SignalRule(
rule_id=rule_id,
title=title,
kind=kind,
domain=domain,
severity=severity,
stage=stage,
categories=tuple(categories),
patterns=tuple(patterns),
positive_summary=summary,
next_action=next_action,
tags=tuple(tags),
)
CAPABILITY_RULES: tuple[SignalRule, ...] = (
rule(
"readiness-surface",
"Readiness operacional detectada",
SignalKind.CAPABILITY,
GateDomain.OBSERVABILITY,
SignalSeverity.INFO,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OBSERVABILITY, NeedCategory.OPERATIONS),
(r"\breadiness\b", r"\bready\b", r"prontid"),
"O repositorio possui indicios de readiness ou prontidao operacional.",
"manter readiness como evidencia regressiva",
("readiness",),
),
rule(
"health-surface",
"Health check detectado",
SignalKind.CAPABILITY,
GateDomain.RUNTIME,
SignalSeverity.INFO,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OPERATIONS, NeedCategory.OBSERVABILITY),
(r"\bhealth\b", r"/health\b"),
"O repositorio expoe ou documenta health check.",
"validar health em smoke local ou publicado",
("health",),
),
rule(
"openapi-contract",
"Contrato OpenAPI detectado",
SignalKind.CAPABILITY,
GateDomain.CONTRACT,
SignalSeverity.INFO,
HumanReadinessStage.HUMAN_EXPLAINABLE,
(NeedCategory.DOCUMENTATION, NeedCategory.INTEGRATION, NeedCategory.GOVERNANCE),
(r"openapi", r"swagger"),
"O repositorio possui contrato OpenAPI, documento ou rota relacionada.",
"manter contrato sincronizado com rotas reais",
("openapi",),
),
rule(
"panel-ready-signal",
"panelReady detectado",
SignalKind.CAPABILITY,
GateDomain.PANEL,
SignalSeverity.INFO,
HumanReadinessStage.PANEL_READY,
(NeedCategory.EXPERIENCE, NeedCategory.GOVERNANCE),
(r"panelready", r"panel ready", r"painel.*pront"),
"Ha indicio de contrato de tela pronto para painel humano.",
"validar se panelReady usa a mesma fonte do GPT",
("panelReady",),
),
rule(
"same-source-signal",
"sameSource detectado",
SignalKind.CAPABILITY,
GateDomain.PANEL,
SignalSeverity.INFO,
HumanReadinessStage.PANEL_READY,
(NeedCategory.EXPERIENCE, NeedCategory.GOVERNANCE, NeedCategory.OBSERVABILITY),
(r"samesource", r"same source", r"mesma fonte", r"sourcehash", r"recordsHash"),
"Ha indicio de mesma fonte entre GPT, painel e evidencia.",
"manter hash de fonte e registros em regressao",
("sameSource",),
),
rule(
"credential-ref-signal",
"credentialRef detectado",
SignalKind.CAPABILITY,
GateDomain.SECURITY,
SignalSeverity.INFO,
HumanReadinessStage.CONTROLLED_READY,
(NeedCategory.SECURITY, NeedCategory.INTEGRATION),
(r"credentialref", r"credential ref", r"credential:"),
"A plataforma usa referencia de credencial em vez de expor segredo bruto.",
"validar nao vazamento em relatorios, logs e respostas",
("credentialRef", "redaction"),
),
rule(
"byok-signal",
"BYOK detectado",
SignalKind.CAPABILITY,
GateDomain.INTEGRATION,
SignalSeverity.INFO,
HumanReadinessStage.CONTROLLED_READY,
(NeedCategory.INTEGRATION, NeedCategory.SECURITY, NeedCategory.COMMERCIAL),
(r"\bbyok\b", r"bring your own key", r"credencial.*cliente"),
"A jornada BYOK aparece como superficie tecnica ou comercial.",
"validar cadeia organizacao, usuario, entitlement, credentialRef, smoke e consumo",
("BYOK",),
),
rule(
"audit-trace-signal",
"Trace e auditoria detectados",
SignalKind.CAPABILITY,
GateDomain.OBSERVABILITY,
SignalSeverity.INFO,
HumanReadinessStage.HUMAN_EXPLAINABLE,
(NeedCategory.OBSERVABILITY, NeedCategory.GOVERNANCE),
(r"\baudit\b", r"\btrace\b", r"auditid", r"traceid", r"auditoria"),
"A plataforma registra ou expõe trace/audit para leitura operacional.",
"garantir que trace/audit nao contenham segredo bruto",
("audit", "trace"),
),
rule(
"business-entitlement-signal",
"Entitlement ou regra comercial detectada",
SignalKind.CAPABILITY,
GateDomain.BUSINESS,
SignalSeverity.INFO,
HumanReadinessStage.HUMAN_EXPLAINABLE,
(NeedCategory.COMMERCIAL, NeedCategory.FINANCE),
(r"entitlement", r"checkout", r"invoice", r"billing", r"franquia", r"cobranca"),
"Ha evidencia de regra comercial, cobranca, consumo ou entitlement.",
"sincronizar Business como fonte unica de plano, franquia e bloqueio",
("business", "entitlement"),
),
rule(
"identity-rbac-signal",
"Identity/RBAC detectado",
SignalKind.CAPABILITY,
GateDomain.IDENTITY,
SignalSeverity.INFO,
HumanReadinessStage.HUMAN_EXPLAINABLE,
(NeedCategory.SECURITY, NeedCategory.ADMINISTRATION, NeedCategory.GOVERNANCE),
(r"\brbac\b", r"identity", r"organizacao", r"organization", r"user", r"usuario", r"tenant"),
"Ha evidencia de identidade, papel, organizacao, tenant ou permissao.",
"amarrar actor, organizationId, role e escopo nos contratos humanos",
("identity", "rbac"),
),
rule(
"wrangler-operational-signal",
"Wrangler operacional detectado",
SignalKind.CAPABILITY,
GateDomain.CLOUD,
SignalSeverity.INFO,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OPERATIONS, NeedCategory.INTEGRATION),
(r"\bwrangler\b", r"workers\.dev", r"cloudflare worker"),
"Ha evidencia de operacao Cloudflare por wrangler/Worker.",
"usar wrangler para deploy, logs, rotas, secrets e health checks",
("wrangler", "cloudflare"),
),
)
BLOCKER_RULES: tuple[SignalRule, ...] = (
rule(
"repo-missing",
"Repositorio real ausente",
SignalKind.BLOCKER,
GateDomain.REPOSITORY,
SignalSeverity.CRITICAL,
HumanReadinessStage.NOT_FOUND,
(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
(r"repositorio real nao encontrado", r"repo.*ausente", r"not found"),
"Sem repositorio real local nao ha base material para validar a plataforma.",
"criar ou clonar repositorio real sem numero da pasta gerencial",
("repository",),
),
rule(
"git-missing",
"Git local ausente ou inacessivel",
SignalKind.BLOCKER,
GateDomain.REPOSITORY,
SignalSeverity.HIGH,
HumanReadinessStage.LOCAL_ONLY,
(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
(r"sem \.git", r"git.*ausente", r"git.*inacess", r"permission denied.*\.git", r"index\.lock"),
"Sem Git operacional a rodada nao consegue registrar commit, hash e sincronizacao.",
"resolver ACL de .git, configurar origin e repetir commit/push",
("git", "sync"),
),
rule(
"tests-missing",
"Testes nao encontrados",
SignalKind.GAP,
GateDomain.TESTS,
SignalSeverity.MEDIUM,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OPERATIONS, NeedCategory.OBSERVABILITY),
(r"testes nao encontrados", r"no tests", r"sem teste"),
"A varredura nao encontrou suite ou smoke detectavel.",
"criar teste canonico de health/readiness/contrato humano",
("tests",),
),
rule(
"openapi-missing",
"Contrato OpenAPI nao encontrado",
SignalKind.GAP,
GateDomain.CONTRACT,
SignalSeverity.MEDIUM,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.DOCUMENTATION, NeedCategory.INTEGRATION),
(r"openapi nao encontrado", r"openapi.*missing", r"sem openapi"),
"Sem contrato OpenAPI ou equivalente, a integracao fica menos auditavel.",
"publicar OpenAPI minima ou declarar contrato alternativo versionado",
("openapi", "contract"),
),
rule(
"docs-catalog-only",
"Docs catalogOnly exige decisao formal",
SignalKind.BLOCKER,
GateDomain.DOCS,
SignalSeverity.HIGH,
HumanReadinessStage.CATALOG_ONLY,
(NeedCategory.DOCUMENTATION, NeedCategory.GOVERNANCE),
(r"catalogonly", r"catalog_only", r"catalog-only", r"docs.*catalog"),
"Docs aparece como catalogOnly; isso precisa ser excecao formal ou leitura minima responseReady.",
"promover leitura canonica minima de Docs ou registrar excecao deliberada",
("docs", "catalogOnly"),
),
rule(
"intelligence-unsupported",
"Intelligence sem promocao operacional completa",
SignalKind.BLOCKER,
GateDomain.GOVERNANCE,
SignalSeverity.HIGH,
HumanReadinessStage.CATALOG_ONLY,
(NeedCategory.STRATEGY, NeedCategory.OBSERVABILITY),
(r"unsupported", r"catalogonly-local-ready", r"intelligence.*planned", r"public endpoint.*missing"),
"Intelligence aparece local/catalogada, mas ainda depende de endpoint, storage ou registro operacional.",
"manter como catalogOnly planejada ate publicar smoke HTTP e registrar no MCP central",
("intelligence", "unsupported"),
),
rule(
"credential-live-pending",
"Credencial live ou BYOK pendente",
SignalKind.BLOCKER,
GateDomain.INTEGRATION,
SignalSeverity.HIGH,
HumanReadinessStage.CONTROLLED_READY,
(NeedCategory.INTEGRATION, NeedCategory.SECURITY, NeedCategory.COMMERCIAL),
(r"token.*missing", r"credential.*not.*ready", r"needs_token", r"live.*credential", r"credencial live", r"byok.*pend"),
"A integracao depende de credencial live, token ou credentialRef por tenant.",
"criar sessao BYOK, gerar credentialRef, executar smoke readonly e provar nao vazamento",
("credential", "BYOK"),
),
rule(
"panel-source-divergence",
"Painel e GPT podem divergir",
SignalKind.RISK,
GateDomain.PANEL,
SignalSeverity.HIGH,
HumanReadinessStage.PANEL_READY,
(NeedCategory.EXPERIENCE, NeedCategory.GOVERNANCE),
(r"samesource.*false", r"same source.*false", r"diverg", r"source.*mismatch"),
"Ha indicio de divergencia entre fonte do painel e fonte explicada pelo GPT.",
"reconciliar sourceEndpoint, sourceToolId, sourcePayloadHash e sourceRecordsHash",
("sameSource", "panelReady"),
),
rule(
"plugin-cloudflare-expected-denial",
"Negativa do plugin Cloudflare nao e blocker operacional",
SignalKind.EXCEPTION,
GateDomain.CLOUD,
SignalSeverity.INFO,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OPERATIONS, NeedCategory.INTEGRATION),
(r"plugin.*cloudflare.*denied", r"cloudflare-plugin-auth-denied", r"user rejected mcp tool call"),
"Falha ou negativa do plugin Cloudflare e esperada e nao deve bloquear a OS.",
"registrar tentativa do plugin e seguir trabalho operacional por wrangler quando aplicavel",
("cloudflare-plugin", "expected"),
),
rule(
"cloudflare-binding-local-blocker",
"Bindings Cloudflare ausentes no ambiente local",
SignalKind.RISK,
GateDomain.CLOUD,
SignalSeverity.MEDIUM,
HumanReadinessStage.TECHNICAL_READY,
(NeedCategory.OPERATIONS, NeedCategory.INTEGRATION),
(r"cloudflare-bindings", r"binding.*missing", r"bindings.*ausent", r"d1.*missing", r"kv.*missing", r"r2.*missing"),
"O runtime local indica bindings ausentes; isso limita prova live, mas nao invalida evidencia local.",
"validar bindings com wrangler e registrar ambiente alvo do smoke",
("cloudflare", "bindings"),
),
)
PLATFORM_SPECIFIC_SIGNALS: dict[str, tuple[SignalRule, ...]] = {
"docs": (
rule(
"docs-canonical-read",
"Leitura canonica de Docs precisa ficar explicita",
SignalKind.DECISION,
GateDomain.DOCS,
SignalSeverity.HIGH,
HumanReadinessStage.CATALOG_ONLY,
(NeedCategory.DOCUMENTATION, NeedCategory.GOVERNANCE),
(r"docs", r"document", r"contrato", r"canon"),
"Docs precisa decidir entre leitura responseReady minima e excecao catalogOnly formal.",
"criar gate Docs: responseReady minimo ou excecao documentada sem bloquear ready global",
("docs", "decision"),
),
),
"integracoes": (
rule(
"integracoes-byok-chain",
"Jornada BYOK ponta a ponta deve ser provada",
SignalKind.DECISION,
GateDomain.INTEGRATION,
SignalSeverity.HIGH,
HumanReadinessStage.CONTROLLED_READY,
(NeedCategory.INTEGRATION, NeedCategory.SECURITY, NeedCategory.COMMERCIAL),
(r"byok", r"credentialref", r"cloudflare", r"gitlab", r"stripe", r"whatsapp"),
"Integracoes tem base BYOK, mas precisa provar usuario, organizacao, entitlement, credentialRef e smoke.",
"executar fluxo encadeado BYOK com nao vazamento e consumo auditavel",
("BYOK", "integracoes"),
),
),
"business": (
rule(
"business-blocker-isolation",
"Business deve isolar blockers por produto",
SignalKind.DECISION,
GateDomain.BUSINESS,
SignalSeverity.MEDIUM,
HumanReadinessStage.HUMAN_EXPLAINABLE,
(NeedCategory.COMMERCIAL, NeedCategory.FINANCE, NeedCategory.GOVERNANCE),
(r"blocker", r"panelready", r"entitlement", r"readycontrolled", r"commercial"),
"Business aparece como fonte de readiness comercial e precisa impedir contaminacao global indevida.",
"validar blocker por productId, stage e impacto comercial isolado",
("business", "blocker-policy"),
),
),
"compliance": (
rule(
"compliance-admin-view",
"Compliance deve manter admin view same-source",
SignalKind.DECISION,
GateDomain.COMPLIANCE,
SignalSeverity.MEDIUM,
HumanReadinessStage.PANEL_READY,
(NeedCategory.LEGAL, NeedCategory.SECURITY, NeedCategory.GOVERNANCE),
(r"compliance\.admin_view\.readiness", r"sameSource", r"panelReady", r"retention", r"policy"),
"Compliance possui admin view e deve manter mesma fonte, redaction, retention e evidencia.",
"validar regressao de panelReady, source hash, retention e dados redigidos",
("compliance", "admin-view"),
),
),
"intelligence": (
rule(
"intelligence-promotion-gates",
"Intelligence precisa de gates de promocao",
SignalKind.DECISION,
GateDomain.GOVERNANCE,
SignalSeverity.HIGH,
HumanReadinessStage.CATALOG_ONLY,
(NeedCategory.STRATEGY, NeedCategory.OBSERVABILITY, NeedCategory.GOVERNANCE),
(r"runtimeMinimum", r"responseReadyControlled", r"catalogOnly-local-ready", r"public smoke"),
"Intelligence ja descreve gates, mas precisa evidencia publica para sair de catalogOnly local.",
"executar smoke publico health/profile/readiness/openapi/admin e publicar evidencia",
("intelligence", "promotion"),
),
),
}
def evidence_text(report: PlatformHumanReport) -> str:
parts = [
report.platform.platform_id,
report.platform.title,
report.platform.mission,
report.scan.readme_excerpt,
" ".join(report.scan.warnings),
]
parts.extend(evidence.summary for evidence in report.scan.evidence[:240])
parts.extend(evidence.path for evidence in report.scan.evidence[:240])
parts.extend(recommendation.title + " " + recommendation.reason for recommendation in report.recommendations[:12])
return "\n".join(parts)
def refs_for_rule(report: PlatformHumanReport, rule_item: SignalRule, limit: int = 6) -> tuple[SourceReference, ...]:
matched = []
for evidence in report.scan.evidence:
text = f"{evidence.path} {evidence.summary} {' '.join(evidence.tags)}"
if rule_item.matches(text):
matched.append(evidence)
refs = list(source_refs_from_evidence(matched, limit=limit))
if not refs and report.scan.warnings:
for warning in report.scan.warnings:
if rule_item.matches(warning):
refs.append(SourceReference(path=report.scan.repo_path, summary=warning, confidence=SourceConfidence.DERIVED, role=EvidenceRole.PRIMARY))
if not refs and report.platform.known_blockers:
for blocker in report.platform.known_blockers:
if rule_item.matches(blocker) or rule_item.kind == SignalKind.BLOCKER:
refs.append(SourceReference(path=report.scan.repo_path, summary=blocker, confidence=SourceConfidence.DECLARED, role=EvidenceRole.PRIMARY))
if not refs and rule_item.matches(report.scan.readme_excerpt):
refs.append(SourceReference(path=f"{report.scan.repo_path}/README.md", summary="README contem sinal relacionado.", confidence=SourceConfidence.DERIVED))
return tuple(refs[:limit])
def signal_from_rule(report: PlatformHumanReport, rule_item: SignalRule, refs: Sequence[SourceReference] | None = None) -> OperationalSignal:
refs = tuple(refs or ())
base = {
"platform": report.platform.platform_id,
"rule": rule_item.rule_id,
"refs": [ref.reference for ref in refs],
}
return OperationalSignal(
signal_id=f"{report.platform.platform_id}.{rule_item.rule_id}.{stable_digest(base, length=8)}",
platform_id=report.platform.platform_id,
kind=rule_item.kind,
domain=rule_item.domain,
title=rule_item.title,
summary=rule_item.positive_summary,
severity=rule_item.severity,
stage=rule_item.stage,
categories=rule_item.categories,
sources=tuple(refs),
tags=rule_item.tags,
next_action=rule_item.next_action,
)
def scan_rules(report: PlatformHumanReport, rules: Sequence[SignalRule]) -> tuple[OperationalSignal, ...]:
text = evidence_text(report)
signals: list[OperationalSignal] = []
for rule_item in rules:
if not rule_item.matches(text):
continue
refs = refs_for_rule(report, rule_item)
signals.append(signal_from_rule(report, rule_item, refs))
return tuple(signals)
def repository_signals(report: PlatformHumanReport) -> tuple[OperationalSignal, ...]:
scan = report.scan
signals: list[OperationalSignal] = []
if scan.exists:
signals.append(
OperationalSignal(
signal_id=f"{scan.platform.platform_id}.repo.exists",
platform_id=scan.platform.platform_id,
kind=SignalKind.CAPABILITY,
domain=GateDomain.REPOSITORY,
title="Repositorio local encontrado",
summary="O espelho local existe e pode ser analisado.",
severity=SignalSeverity.INFO,
stage=HumanReadinessStage.LOCAL_ONLY,
sources=(SourceReference(path=scan.repo_path, summary="Repositorio existe no filesystem.", confidence=SourceConfidence.DIRECT),),
tags=("repository",),
next_action="manter repositorio sincronizado e com hash rastreavel",
)
)
else:
signals.append(
OperationalSignal(
signal_id=f"{scan.platform.platform_id}.repo.missing",
platform_id=scan.platform.platform_id,
kind=SignalKind.BLOCKER,
domain=GateDomain.REPOSITORY,
title="Repositorio local ausente",
summary="Nao existe espelho local para leitura ou validacao.",
severity=SignalSeverity.CRITICAL,
stage=HumanReadinessStage.NOT_FOUND,
sources=(SourceReference(path=scan.repo_path, summary="Caminho nao encontrado.", confidence=SourceConfidence.MISSING, role=EvidenceRole.ABSENT),),
tags=("repository", "missing"),
next_action="criar ou clonar o repositorio real",
)
)
if scan.git_present:
signals.append(
OperationalSignal(
signal_id=f"{scan.platform.platform_id}.git.present",
platform_id=scan.platform.platform_id,
kind=SignalKind.CAPABILITY,
domain=GateDomain.REPOSITORY,
title="Git local detectado",
summary="Branch, HEAD ou metadados Git foram detectados no repositorio.",
severity=SignalSeverity.INFO,
stage=HumanReadinessStage.TECHNICAL_READY,
sources=source_refs_from_strings((scan.branch or "branch desconhecida", scan.head or "head desconhecido", scan.remote_origin or "origin nao configurado"), "Metadado Git detectado."),
tags=("git",),
next_action="validar git status e sincronizacao no fechamento",
)
)
elif scan.exists:
signals.append(
OperationalSignal(
signal_id=f"{scan.platform.platform_id}.git.missing",
platform_id=scan.platform.platform_id,
kind=SignalKind.BLOCKER,
domain=GateDomain.REPOSITORY,
title="Git local nao detectado",
summary="Repositorio existe, mas .git nao foi detectado pela varredura.",
severity=SignalSeverity.HIGH,
stage=HumanReadinessStage.LOCAL_ONLY,
sources=(SourceReference(path=scan.repo_path, summary="Repositorio sem .git detectavel.", confidence=SourceConfidence.MISSING),),
tags=("git", "sync"),
next_action="inicializar Git ou corrigir permissao local de .git",
)
)
return tuple(signals)
def warning_signals(report: PlatformHumanReport) -> tuple[OperationalSignal, ...]:
signals: list[OperationalSignal] = []
for warning in report.scan.warnings:
matched = False
for rule_item in BLOCKER_RULES:
if rule_item.matches(warning):
refs = (SourceReference(path=report.scan.repo_path, summary=warning, confidence=SourceConfidence.DERIVED, role=EvidenceRole.PRIMARY),)
signals.append(signal_from_rule(report, rule_item, refs))
matched = True
if not matched:
signal_id = f"{report.platform.platform_id}.warning.{slugify(warning)}.{stable_digest(warning, 6)}"
signals.append(
OperationalSignal(
signal_id=signal_id,
platform_id=report.platform.platform_id,
kind=SignalKind.GAP,
domain=GateDomain.GOVERNANCE,
title="Warning de varredura",
summary=warning,
severity=SignalSeverity.MEDIUM,
stage=HumanReadinessStage.TECHNICAL_READY,
sources=(SourceReference(path=report.scan.repo_path, summary=warning, confidence=SourceConfidence.DERIVED),),
tags=("warning",),
next_action="classificar warning e registrar evidencia de resolucao ou excecao",
)
)
return tuple(signals)
def known_blocker_signals(report: PlatformHumanReport) -> tuple[OperationalSignal, ...]:
signals: list[OperationalSignal] = []
for blocker in report.platform.known_blockers:
matched_rules = [rule_item for rule_item in BLOCKER_RULES if rule_item.matches(blocker)]
if not matched_rules:
matched_rules = [
rule(
"known-blocker",
"Bloqueio conhecido catalogado",
SignalKind.BLOCKER,
GateDomain.GOVERNANCE,
SignalSeverity.HIGH,
HumanReadinessStage.BLOCKED,
report.platform.primary_categories,
(re.escape(blocker.lower()),),
"Bloqueio conhecido precisa ser resolvido, isolado ou formalizado.",
"resolver, isolar ou formalizar o bloqueio conhecido",
("known-blocker",),
)
]
for rule_item in matched_rules:
refs = (SourceReference(path=report.scan.repo_path, summary=blocker, confidence=SourceConfidence.DECLARED, role=EvidenceRole.PRIMARY),)
signals.append(signal_from_rule(report, rule_item, refs))
return tuple(signals)
def score_signals(report: PlatformHumanReport) -> tuple[OperationalSignal, ...]:
score = report.average_score
if score >= 90:
return (
OperationalSignal(
signal_id=f"{report.platform.platform_id}.score.high",
platform_id=report.platform.platform_id,
kind=SignalKind.CAPABILITY,
domain=GateDomain.GOVERNANCE,
title="Score humano alto",
summary=f"Score medio humano {score}; a plataforma tem boa cobertura por perfil.",
severity=SignalSeverity.INFO,
stage=HumanReadinessStage.HUMAN_EXPLAINABLE,
tags=("score",),
next_action="trocar score isolado por gates com blockers formais e evidencia viva",
),
)
if score >= 70:
severity = SignalSeverity.LOW
stage = HumanReadinessStage.HUMAN_EXPLAINABLE
elif score >= 50:
severity = SignalSeverity.MEDIUM
stage = HumanReadinessStage.TECHNICAL_READY
else:
severity = SignalSeverity.HIGH
stage = HumanReadinessStage.PLANNED
weakest = sorted(report.cells, key=lambda item: item.score)[:4]
evidence = tuple(
SourceReference(path=report.scan.repo_path, summary=f"{cell.profile_id}:{cell.score}", confidence=SourceConfidence.DERIVED)
for cell in weakest
)
return (
OperationalSignal(
signal_id=f"{report.platform.platform_id}.score.attention",
platform_id=report.platform.platform_id,
kind=SignalKind.GAP,
domain=GateDomain.GOVERNANCE,
title="Score humano exige melhoria",
summary=f"Score medio humano {score}; perfis mais frageis precisam de OS direcionada.",
severity=severity,
stage=stage,
sources=evidence,
tags=("score", "matrix"),
next_action="priorizar perfis de menor score em tela, relatorio ou comando humano",
),
)
def recommendation_signals(report: PlatformHumanReport, recommendations: Sequence[Recommendation]) -> tuple[OperationalSignal, ...]:
signals: list[OperationalSignal] = []
for rec in recommendations:
if rec.platform_id != report.platform.platform_id:
continue
severity = SignalSeverity.HIGH if rec.priority >= 85 else SignalSeverity.MEDIUM if rec.priority >= 65 else SignalSeverity.LOW
kind = SignalKind.BLOCKER if rec.priority >= 85 else SignalKind.DECISION
domain = domain_from_categories(rec.categories)
refs = source_refs_from_strings(rec.affected_paths or (report.scan.repo_path,), rec.reason, confidence=SourceConfidence.DECLARED)
signals.append(
OperationalSignal(
signal_id=f"{rec.recommendation_id}.{stable_digest(rec.reason, 6)}",
platform_id=report.platform.platform_id,
kind=kind,
domain=domain,
title=rec.title,
summary=rec.reason,
severity=severity,
stage=HumanReadinessStage.HUMAN_EXPLAINABLE,
categories=rec.categories,
sources=refs,
tags=("recommendation", rec.suggested_order_type.value),
next_action=rec.expected_impact,
)
)
return tuple(signals)
def domain_from_categories(categories: Sequence[NeedCategory]) -> GateDomain:
priority = {
NeedCategory.SECURITY: GateDomain.SECURITY,
NeedCategory.INTEGRATION: GateDomain.INTEGRATION,
NeedCategory.COMMERCIAL: GateDomain.BUSINESS,
NeedCategory.FINANCE: GateDomain.BUSINESS,
NeedCategory.LEGAL: GateDomain.COMPLIANCE,
NeedCategory.DOCUMENTATION: GateDomain.DOCS,
NeedCategory.EXPERIENCE: GateDomain.PANEL,
NeedCategory.OBSERVABILITY: GateDomain.OBSERVABILITY,
NeedCategory.GOVERNANCE: GateDomain.GOVERNANCE,
NeedCategory.OPERATIONS: GateDomain.RUNTIME,
}
for category in categories:
if category in priority:
return priority[category]
return GateDomain.GOVERNANCE
def dedupe_signals(signals: Iterable[OperationalSignal]) -> tuple[OperationalSignal, ...]:
seen: set[tuple[str, str, str]] = set()
output: list[OperationalSignal] = []
for signal in signals:
key = (signal.platform_id, signal.title.lower(), signal.domain.value)
if key in seen:
continue
seen.add(key)
output.append(signal)
output.sort(key=lambda item: (item.platform_id, -severity_to_sort(item.severity), item.domain.value, item.title))
return tuple(output)
def severity_to_sort(severity: SignalSeverity) -> int:
return {
SignalSeverity.CRITICAL: 5,
SignalSeverity.HIGH: 4,
SignalSeverity.MEDIUM: 3,
SignalSeverity.LOW: 2,
SignalSeverity.INFO: 1,
}.get(severity, 0)
def build_operational_signals(report: PlatformHumanReport, recommendations: Sequence[Recommendation] = ()) -> tuple[OperationalSignal, ...]:
signals: list[OperationalSignal] = []
signals.extend(repository_signals(report))
signals.extend(scan_rules(report, CAPABILITY_RULES))
signals.extend(scan_rules(report, BLOCKER_RULES))
signals.extend(scan_rules(report, PLATFORM_SPECIFIC_SIGNALS.get(report.platform.platform_id, ())))
signals.extend(warning_signals(report))
signals.extend(known_blocker_signals(report))
signals.extend(score_signals(report))
signals.extend(recommendation_signals(report, recommendations))
return dedupe_signals(signals)
def summarize_blockers(signals: Sequence[OperationalSignal], limit: int = 12) -> tuple[str, ...]:
blockers = [signal for signal in signals if signal.is_blocking]
blockers.sort(key=lambda item: (-severity_to_sort(item.severity), item.platform_id, item.title))
return merge_unique(f"{signal.platform_id}: {signal.title} - {signal.next_action}" for signal in blockers[:limit])
def summarize_capabilities(signals: Sequence[OperationalSignal], limit: int = 12) -> tuple[str, ...]:
caps = [signal for signal in signals if signal.kind == SignalKind.CAPABILITY]
caps.sort(key=lambda item: (item.platform_id, item.domain.value, item.title))
return merge_unique(f"{signal.platform_id}: {signal.title}" for signal in caps[:limit])
def scan_repository_for_order_text(repo_path: Path, patterns: Sequence[str], max_files: int = 80) -> tuple[SourceReference, ...]:
"""Search text files for specific order-related patterns.
The function is intentionally small and safe: it skips known build/vendor
directories, reads only bounded text files, and returns references rather
than raw content.
"""
skip = {".git", "node_modules", "dist", "build", "coverage", "__pycache__", ".wrangler", "vendor"}
extensions = {".md", ".ts", ".tsx", ".js", ".mjs", ".cjs", ".py", ".json", ".toml", ".yml", ".yaml"}
refs: list[SourceReference] = []
if not repo_path.exists():
return (
SourceReference(path=str(repo_path), summary="Repositorio nao encontrado para busca de texto.", confidence=SourceConfidence.MISSING, role=EvidenceRole.ABSENT),
)
stack = [repo_path]
while stack and len(refs) < max_files:
current = stack.pop()
try:
entries = sorted(current.iterdir(), key=lambda item: item.name.lower())
except OSError:
continue
for entry in entries:
if entry.is_dir():
if entry.name not in skip:
stack.append(entry)
continue
if entry.suffix.lower() not in extensions:
continue
try:
if entry.stat().st_size > 240_000:
continue
lines = entry.read_text(encoding="utf-8", errors="ignore").splitlines()
except OSError:
continue
rel = str(entry.relative_to(repo_path)).replace("\\", "/")
for index, line in enumerate(lines, start=1):
lowered = line.lower()
if any(re.search(pattern, lowered, re.I) for pattern in patterns):
refs.append(
SourceReference(
path=rel,
line=index,
summary="Trecho local referencia tema da ordem sem expor conteudo bruto.",
confidence=SourceConfidence.DIRECT,
role=EvidenceRole.SUPPORTING,
)
)
break
if len(refs) >= max_files:
break
return tuple(refs)

669
src/mais_humana/catalog.py Normal file
View File

@@ -0,0 +1,669 @@
"""Canonical ecosystem catalog used by the human analysis engine."""
from __future__ import annotations
from .models import HumanNeed, HumanProfile, NeedCategory, PlatformDefinition
HUMAN_PROFILES: tuple[HumanProfile, ...] = (
HumanProfile(
profile_id="administrador_empresa",
name="Administrador da empresa",
description="Pessoa responsavel por configurar acesso, planos, operacao e visao geral.",
priority_needs=(
NeedCategory.ADMINISTRATION,
NeedCategory.SECURITY,
NeedCategory.COMMERCIAL,
NeedCategory.OBSERVABILITY,
NeedCategory.GOVERNANCE,
),
typical_questions=(
"Quem pode acessar esta plataforma?",
"O que esta funcionando agora?",
"Quais pendencias impedem uso seguro?",
"Como eu bloqueio ou libero um cliente?",
),
expected_outputs=(
"painel executivo",
"readiness operacional",
"matriz de permissao",
"relatorio de bloqueios",
),
sensitive_concerns=("segredo", "permissao", "bloqueio indevido", "auditoria"),
),
HumanProfile(
profile_id="ceo",
name="CEO",
description="Pessoa que precisa de leitura executiva, valor do produto e risco estrategico.",
priority_needs=(
NeedCategory.STRATEGY,
NeedCategory.COMMERCIAL,
NeedCategory.OBSERVABILITY,
NeedCategory.DOCUMENTATION,
),
typical_questions=(
"Quais plataformas ja sustentam receita?",
"Onde esta o maior risco operacional?",
"Que parte ja pode ir para piloto?",
"Qual proximo investimento desbloqueia valor?",
),
expected_outputs=("sumario executivo", "grafico de maturidade", "mapa de risco"),
),
HumanProfile(
profile_id="gestor_operacional",
name="Gestor operacional",
description="Pessoa que coordena execucao diaria, fila de incidentes e continuidade.",
priority_needs=(
NeedCategory.OPERATIONS,
NeedCategory.SUPPORT,
NeedCategory.OBSERVABILITY,
NeedCategory.GOVERNANCE,
),
typical_questions=(
"Qual ordem devo executar agora?",
"Que teste comprova esta entrega?",
"Que incidente esta aberto?",
"Quem depende desta plataforma?",
),
expected_outputs=("fila operacional", "runbook", "evidencia", "proxima OS"),
),
HumanProfile(
profile_id="suporte",
name="Equipe de suporte",
description="Pessoa que atende falhas, explica limites e encaminha diagnostico.",
priority_needs=(
NeedCategory.SUPPORT,
NeedCategory.OPERATIONS,
NeedCategory.DOCUMENTATION,
NeedCategory.OBSERVABILITY,
),
typical_questions=(
"O erro e do usuario, da plataforma ou de provedor externo?",
"Qual proxima acao sugerida?",
"Que dado posso mostrar sem vazar segredo?",
"Existe historico do incidente?",
),
expected_outputs=("diagnostico sanitizado", "proxima acao", "ticket", "evidencia"),
sensitive_concerns=("dado pessoal", "segredo", "trace bruto"),
),
HumanProfile(
profile_id="atendimento_cliente",
name="Atendimento ao cliente",
description="Pessoa que traduz estado tecnico em orientacao simples para cliente.",
priority_needs=(
NeedCategory.EXPERIENCE,
NeedCategory.SUPPORT,
NeedCategory.DOCUMENTATION,
NeedCategory.SELF_SERVICE,
),
typical_questions=(
"O que eu posso prometer ao cliente?",
"Qual tela resolve esta duvida?",
"Qual mensagem explica a pendencia?",
"O cliente consegue se autosservir?",
),
expected_outputs=("resposta simples", "status do pedido", "artigo de ajuda", "acao orientada"),
),
HumanProfile(
profile_id="financeiro",
name="Financeiro",
description="Pessoa que cuida de fatura, consumo, bloqueio comercial e previsao.",
priority_needs=(
NeedCategory.FINANCE,
NeedCategory.COMMERCIAL,
NeedCategory.OBSERVABILITY,
NeedCategory.GOVERNANCE,
),
typical_questions=(
"Qual foi o consumo?",
"Existe excedente?",
"Qual cliente esta inadimplente?",
"O bloqueio comercial esta correto?",
),
expected_outputs=("fatura", "consumo", "franquia", "bloqueio comercial"),
sensitive_concerns=("valor", "inadimplencia", "plano", "dados fiscais"),
),
HumanProfile(
profile_id="contador",
name="Contador",
description="Pessoa que precisa de consistencia fiscal, extratos e historico financeiro.",
priority_needs=(
NeedCategory.FINANCE,
NeedCategory.DOCUMENTATION,
NeedCategory.GOVERNANCE,
),
typical_questions=(
"Existe relatorio exportavel?",
"A fatura tem trilha?",
"O historico financeiro e confiavel?",
),
expected_outputs=("extrato", "relatorio fiscal", "evidencia de cobranca"),
),
HumanProfile(
profile_id="juridico",
name="Juridico",
description="Pessoa que avalia riscos, contratos, privacidade e uso de fontes.",
priority_needs=(
NeedCategory.LEGAL,
NeedCategory.SECURITY,
NeedCategory.DOCUMENTATION,
NeedCategory.GOVERNANCE,
),
typical_questions=(
"Qual contrato rege esta ferramenta?",
"A fonte e publica ou restrita?",
"Existe consentimento?",
"Qual evidencia prova a acao?",
),
expected_outputs=("contrato", "politica", "evidencia", "classificacao de risco"),
sensitive_concerns=("segredo de justica", "consentimento", "dados pessoais"),
),
HumanProfile(
profile_id="secretaria",
name="Secretaria",
description="Pessoa que precisa de tarefas claras, status, agenda e documentos prontos.",
priority_needs=(
NeedCategory.EXPERIENCE,
NeedCategory.OPERATIONS,
NeedCategory.DOCUMENTATION,
NeedCategory.SELF_SERVICE,
),
typical_questions=(
"O que preciso fazer agora?",
"Qual documento esta pronto?",
"Qual cliente precisa de retorno?",
"Onde encontro a tela certa?",
),
expected_outputs=("checklist", "documento", "status", "atalho de tela"),
),
HumanProfile(
profile_id="tecnico",
name="Tecnico",
description="Pessoa que opera deploy, logs, testes, secrets e integracoes.",
priority_needs=(
NeedCategory.INTEGRATION,
NeedCategory.SECURITY,
NeedCategory.OPERATIONS,
NeedCategory.OBSERVABILITY,
),
typical_questions=(
"Qual comando valida isto?",
"Qual endpoint esta quebrado?",
"Qual segredo falta?",
"Como executo smoke sem vazar token?",
),
expected_outputs=("comando", "health", "smoke", "log sanitizado"),
sensitive_concerns=("token", "secret", "binding", "remote"),
),
HumanProfile(
profile_id="usuario_final",
name="Usuario final",
description="Pessoa que usa a interface para resolver uma necessidade sem entender a arquitetura.",
priority_needs=(
NeedCategory.EXPERIENCE,
NeedCategory.SELF_SERVICE,
NeedCategory.DOCUMENTATION,
NeedCategory.SUPPORT,
),
typical_questions=(
"Onde clico?",
"O resultado e confiavel?",
"O que faco quando falha?",
"Posso acompanhar o status?",
),
expected_outputs=("tela clara", "mensagem util", "status", "ajuda"),
),
HumanProfile(
profile_id="cliente_externo",
name="Cliente externo",
description="Pessoa ou organizacao que compra o produto e espera valor, suporte e transparencia.",
priority_needs=(
NeedCategory.COMMERCIAL,
NeedCategory.EXPERIENCE,
NeedCategory.SELF_SERVICE,
NeedCategory.SUPPORT,
),
typical_questions=(
"O produto resolve meu problema?",
"Quanto custa?",
"O que esta incluido?",
"Como acompanho uso e suporte?",
),
expected_outputs=("oferta", "plano", "onboarding", "status de uso"),
),
HumanProfile(
profile_id="planejamento_estrategico",
name="Planejamento estrategico",
description="Pessoa que transforma maturidade, risco e oportunidade em roadmap.",
priority_needs=(
NeedCategory.STRATEGY,
NeedCategory.GOVERNANCE,
NeedCategory.OBSERVABILITY,
NeedCategory.COMMERCIAL,
),
typical_questions=(
"Qual plataforma merece prioridade?",
"Qual pendencia trava receita?",
"Que dependencia impacta mais perfis humanos?",
"Qual maturidade esperamos no proximo ciclo?",
),
expected_outputs=("roadmap", "matriz de prioridade", "score de maturidade", "ordens futuras"),
),
)
HUMAN_NEEDS: tuple[HumanNeed, ...] = (
HumanNeed(
need_id="entender_estado_operacional",
title="Entender estado operacional sem ler codigo",
category=NeedCategory.OBSERVABILITY,
description="A pessoa precisa saber se a plataforma esta pronta, parcial ou bloqueada.",
success_markers=("health", "readiness", "status", "evidencia", "bloqueio classificado"),
risk_if_missing="Decisoes humanas ficam baseadas em impressao tecnica dispersa.",
expected_surfaces=("painel", "relatorio", "MCP admin_ui"),
),
HumanNeed(
need_id="agir_com_permissao_segura",
title="Executar acoes com permissao e trilha",
category=NeedCategory.SECURITY,
description="A acao humana deve respeitar identidade, papel, escopo e auditoria.",
success_markers=("identity", "rbac", "audit", "trace", "redaction"),
risk_if_missing="A operacao pode vazar segredo ou executar acao indevida.",
expected_surfaces=("Identity", "MCP", "logs sanitizados"),
),
HumanNeed(
need_id="comprar_e_usar_produto",
title="Comprar e usar produto com clareza comercial",
category=NeedCategory.COMMERCIAL,
description="Cliente e equipe interna precisam entender plano, entitlement e uso.",
success_markers=("plano", "entitlement", "checkout", "franquia", "bloqueio"),
risk_if_missing="Produto tecnico fica dificil de vender e suportar.",
expected_surfaces=("Business", "Finance", "painel comercial"),
),
HumanNeed(
need_id="receber_suporte_util",
title="Receber suporte que diagnostica e orienta",
category=NeedCategory.SUPPORT,
description="Falhas devem virar diagnostico, proxima acao e evidencia segura.",
success_markers=("incidente", "ticket", "diagnostico", "nextAction", "evidence"),
risk_if_missing="O suporte vira repasse de erro bruto e perde confianca.",
expected_surfaces=("Customer Ops", "MCP admin_ui", "runbook"),
),
HumanNeed(
need_id="documentar_decisao",
title="Documentar decisao e fonte da verdade",
category=NeedCategory.DOCUMENTATION,
description="Pessoas precisam de documentos claros, versionados e verificaveis.",
success_markers=("docs", "contrato", "versao", "hash", "indice"),
risk_if_missing="A memoria do ecossistema se perde entre rodadas.",
expected_surfaces=("Docs", "central de OS", "SQLite semantico"),
),
HumanNeed(
need_id="operar_integracao_real",
title="Operar integracao real sem expor segredo",
category=NeedCategory.INTEGRATION,
description="Integracoes precisam de credencial segura, smoke e estado por tenant.",
success_markers=("BYOK", "credentialRef", "smoke", "tenant", "redaction"),
risk_if_missing="A integracao parece pronta, mas nao e operavel por cliente real.",
expected_surfaces=("Integracoes", "Identity", "Business"),
),
HumanNeed(
need_id="visualizar_sem_backend_paralelo",
title="Visualizar a mesma fonte que o GPT explica",
category=NeedCategory.EXPERIENCE,
description="A UI deve renderizar o mesmo contrato que agentes e MCP consomem.",
success_markers=("panelReady", "sameSource", "sourceHash", "screenData"),
risk_if_missing="Painel e GPT podem divergir e quebrar confianca humana.",
expected_surfaces=("UI Platform", "MCP admin_ui"),
),
HumanNeed(
need_id="planejar_proxima_os",
title="Transformar lacuna em ordem de servico",
category=NeedCategory.GOVERNANCE,
description="Toda lacuna material deve virar continuidade executavel e rastreavel.",
success_markers=("ordem", "pendencia", "criterio de pronto", "validacao"),
risk_if_missing="A rodada avanca sem deixar caminho operacional claro.",
expected_surfaces=("central de OS", "controle semantico", "relatorios"),
),
)
PLATFORMS: tuple[PlatformDefinition, ...] = (
PlatformDefinition(
platform_id="business",
repo_name="tudo-para-ia-business-platform",
central_folder="01_repo_tudo-para-ia-business-platform",
title="Business Platform",
mission="Concentrar contratacao, plano ativo, cobranca, entitlement, consumo e bloqueio comercial.",
primary_categories=(NeedCategory.COMMERCIAL, NeedCategory.FINANCE, NeedCategory.ADMINISTRATION),
expected_profiles=("administrador_empresa", "ceo", "financeiro", "contador", "cliente_externo"),
related_platforms=("identity", "integracoes", "finance", "customer_ops"),
expected_surfaces=("checkout", "billing", "entitlements", "consumption", "commercial-readiness"),
),
PlatformDefinition(
platform_id="compliance",
repo_name="tudo-para-ia-compliance-platform",
central_folder="02_repo_tudo-para-ia-compliance-platform",
title="Compliance Platform",
mission="Governar privacidade, risco, politica, consentimento, auditoria e evidencias.",
primary_categories=(NeedCategory.LEGAL, NeedCategory.SECURITY, NeedCategory.GOVERNANCE),
expected_profiles=("juridico", "administrador_empresa", "ceo", "suporte"),
related_platforms=("identity", "docs", "customer_ops"),
expected_surfaces=("privacy", "risk", "audit", "consent", "retention"),
),
PlatformDefinition(
platform_id="customer_ops",
repo_name="tudo-para-ia-customer-ops-platform",
central_folder="03_repo_tudo-para-ia-customer-ops-platform",
title="Customer Ops Platform",
mission="Organizar suporte, atendimento, incidentes, handoffs e comunicacao com clientes.",
primary_categories=(NeedCategory.SUPPORT, NeedCategory.OPERATIONS, NeedCategory.EXPERIENCE),
expected_profiles=("suporte", "atendimento_cliente", "gestor_operacional", "cliente_externo"),
related_platforms=("business", "identity", "docs"),
expected_surfaces=("tickets", "incidents", "diagnostics", "customer-status", "handoffs"),
),
PlatformDefinition(
platform_id="docs",
repo_name="tudo-para-ia-docs-plataform",
central_folder="04_repo_tudo-para-ia-docs-plataform",
title="Docs Platform",
mission="Ser fonte documental de contratos, guias, provas e leitura canonica do ecossistema.",
primary_categories=(NeedCategory.DOCUMENTATION, NeedCategory.GOVERNANCE, NeedCategory.STRATEGY),
expected_profiles=(
"ceo",
"juridico",
"suporte",
"atendimento_cliente",
"planejamento_estrategico",
),
related_platforms=("mcps", "ui", "compliance"),
expected_surfaces=("canonical-docs", "contracts", "proofs", "help", "runbooks"),
known_blockers=("catalogOnly precisa decisao formal ou leitura responseReady minima",),
),
PlatformDefinition(
platform_id="finance",
repo_name="tudo-para-ia-finance-platform",
central_folder="05_repo_tudo-para-ia-finance-platform",
title="Finance Platform",
mission="Materializar cobranca, fatura, consumo, custos, extratos e reconciliacao financeira.",
primary_categories=(NeedCategory.FINANCE, NeedCategory.COMMERCIAL, NeedCategory.OBSERVABILITY),
expected_profiles=("financeiro", "contador", "ceo", "administrador_empresa"),
related_platforms=("business", "identity"),
expected_surfaces=("invoices", "usage", "cost", "reconciliation", "quota"),
),
PlatformDefinition(
platform_id="gettys",
repo_name="tudo-para-ia-gettys-platform",
central_folder="06_repo_tudo-para-ia-gettys-platform",
title="Gettys Platform",
mission="Fornecer superficie operacional especifica do produto Gettys e sua visao administrativa.",
primary_categories=(NeedCategory.OPERATIONS, NeedCategory.EXPERIENCE, NeedCategory.OBSERVABILITY),
expected_profiles=("gestor_operacional", "usuario_final", "suporte", "cliente_externo"),
related_platforms=("ui", "mcps", "business"),
expected_surfaces=("gettys-overview", "admin-screen", "health", "product-readiness"),
),
PlatformDefinition(
platform_id="identity",
repo_name="tudo-para-ia-identity-platform",
central_folder="07_repo_tudo-para-ia-identity-platform",
title="Identity Platform",
mission="Centralizar usuario, organizacao, sessao, papel, escopo, tenant e autorizacao.",
primary_categories=(NeedCategory.SECURITY, NeedCategory.ADMINISTRATION, NeedCategory.GOVERNANCE),
expected_profiles=("administrador_empresa", "suporte", "tecnico", "usuario_final", "juridico"),
related_platforms=("business", "integracoes", "customer_ops", "compliance"),
expected_surfaces=("rbac", "sessions", "organizations", "incidents", "audit", "contracts"),
),
PlatformDefinition(
platform_id="integracoes",
repo_name="tudo-para-ia-integracoes-plataform",
central_folder="08_repo_tudo-para-ia-integracoes-plataform",
title="Integracoes Platform",
mission="Operar providers externos, BYOK, credenciais, smoke e produtos de integracao.",
primary_categories=(NeedCategory.INTEGRATION, NeedCategory.SECURITY, NeedCategory.COMMERCIAL),
expected_profiles=("tecnico", "administrador_empresa", "gestor_operacional", "cliente_externo"),
related_platforms=("identity", "business", "mcps", "customer_ops"),
expected_surfaces=("BYOK", "providers", "credentials", "smoke", "products", "tenant"),
known_blockers=("tokens live por provider ainda variam por produto",),
),
PlatformDefinition(
platform_id="intelligence",
repo_name="tudo-para-ia-intelligence-platform",
central_folder="09_repo_tudo-para-ia-intelligence-platform",
title="Intelligence Platform",
mission="Planejar inteligencia analitica, recomendacao e leitura assistida do ecossistema.",
primary_categories=(NeedCategory.STRATEGY, NeedCategory.OBSERVABILITY, NeedCategory.EXPERIENCE),
expected_profiles=("ceo", "planejamento_estrategico", "gestor_operacional"),
related_platforms=("mcps", "docs", "business"),
expected_surfaces=("analytics", "recommendations", "risk", "prioritization"),
known_blockers=("estado unsupported/configuracao incompleta precisa decisao formal",),
),
PlatformDefinition(
platform_id="mcps",
repo_name="tudo-para-ia-mcps-internos-plataform",
central_folder="10_repo_tudo-para-ia-mcps-internos-plataform",
title="MCPs Internos Platform",
mission="Ser control-plane MCP, backend de painel humano, catalogo, auditoria e descoberta assistida.",
primary_categories=(NeedCategory.GOVERNANCE, NeedCategory.INTEGRATION, NeedCategory.OBSERVABILITY),
expected_profiles=("tecnico", "gestor_operacional", "administrador_empresa", "planejamento_estrategico"),
related_platforms=("ui", "docs", "integracoes", "identity", "business"),
expected_surfaces=("admin_ui", "tools", "readiness", "sameSource", "evidence", "catalog"),
),
PlatformDefinition(
platform_id="platform_base",
repo_name="tudo-para-ia-platform-base",
central_folder="11_repo_tudo-para-ia-platform-base",
title="Platform Base",
mission="Oferecer padroes, base tecnica, contratos e referencias comuns para plataformas.",
primary_categories=(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS, NeedCategory.DOCUMENTATION),
expected_profiles=("tecnico", "gestor_operacional", "planejamento_estrategico"),
related_platforms=("mcps", "docs", "ui"),
expected_surfaces=("templates", "standards", "contracts", "shared-runtime"),
),
PlatformDefinition(
platform_id="public",
repo_name="tudo-para-ia-public-platform",
central_folder="12_repo_tudo-para-ia-public-platform",
title="Public Platform",
mission="Representar superficies publicas, onboarding, paginas e comunicacao externa.",
primary_categories=(NeedCategory.EXPERIENCE, NeedCategory.COMMERCIAL, NeedCategory.DOCUMENTATION),
expected_profiles=("cliente_externo", "usuario_final", "atendimento_cliente", "ceo"),
related_platforms=("business", "docs", "ui"),
expected_surfaces=("landing", "onboarding", "public-docs", "status"),
),
PlatformDefinition(
platform_id="stj",
repo_name="tudo-para-ia-stj-platform",
central_folder="13_repo_tudo-para-ia-stj-platform",
title="STJ Platform",
mission="Materializar produto juridico/processual e sua leitura operacional vendavel.",
primary_categories=(NeedCategory.LEGAL, NeedCategory.OPERATIONS, NeedCategory.COMMERCIAL),
expected_profiles=("juridico", "cliente_externo", "suporte", "administrador_empresa"),
related_platforms=("business", "integracoes", "compliance", "customer_ops"),
expected_surfaces=("process-query", "monitoring", "public-documents", "legal-readiness"),
),
PlatformDefinition(
platform_id="ui",
repo_name="tudo-para-ia-ui-platform",
central_folder="14_repo_tudo-para-ia-ui-platform",
title="UI Platform",
mission="Renderizar contratos MCP-ready, design system, telas-base e experiencia humana.",
primary_categories=(NeedCategory.EXPERIENCE, NeedCategory.SELF_SERVICE, NeedCategory.GOVERNANCE),
expected_profiles=("usuario_final", "administrador_empresa", "suporte", "atendimento_cliente"),
related_platforms=("mcps", "docs", "business", "identity", "integracoes"),
expected_surfaces=("design-system", "screen-contract", "PWA", "panelReady", "sameSource"),
),
)
PROFILE_BY_ID = {profile.profile_id: profile for profile in HUMAN_PROFILES}
PLATFORM_BY_ID = {platform.platform_id: platform for platform in PLATFORMS}
CATEGORY_KEYWORDS: dict[NeedCategory, tuple[str, ...]] = {
NeedCategory.ADMINISTRATION: (
"admin",
"administrator",
"organizacao",
"organization",
"tenant",
"workspace",
"operador",
"gestao",
),
NeedCategory.SUPPORT: (
"support",
"suporte",
"ticket",
"incident",
"incidente",
"diagnostic",
"diagnostico",
"handoff",
),
NeedCategory.FINANCE: (
"finance",
"billing",
"invoice",
"fatura",
"consumo",
"quota",
"franquia",
"cobranca",
),
NeedCategory.LEGAL: (
"legal",
"jurid",
"privacy",
"privacidade",
"consent",
"contrato",
"processo",
"compliance",
),
NeedCategory.SECURITY: (
"auth",
"rbac",
"identity",
"secret",
"token",
"credential",
"redaction",
"permiss",
),
NeedCategory.OPERATIONS: (
"operation",
"operacao",
"health",
"readiness",
"smoke",
"deploy",
"runbook",
"status",
),
NeedCategory.STRATEGY: (
"strategy",
"estrateg",
"roadmap",
"maturity",
"maturidade",
"prioridade",
"risk",
"risco",
),
NeedCategory.DOCUMENTATION: (
"docs",
"document",
"readme",
"manual",
"guia",
"evidence",
"prova",
"template",
),
NeedCategory.SELF_SERVICE: (
"self-service",
"autosserv",
"portal",
"onboarding",
"wizard",
"form",
"acao",
"action",
),
NeedCategory.COMMERCIAL: (
"business",
"commerce",
"commercial",
"produto",
"plano",
"checkout",
"entitlement",
"sellable",
),
NeedCategory.EXPERIENCE: (
"ui",
"screen",
"view",
"panel",
"pwa",
"interface",
"layout",
"visual",
),
NeedCategory.GOVERNANCE: (
"governance",
"governanca",
"audit",
"trace",
"contract",
"version",
"schema",
"policy",
),
NeedCategory.INTEGRATION: (
"integration",
"integracao",
"provider",
"BYOK",
"cloudflare",
"gitlab",
"stripe",
"whatsapp",
),
NeedCategory.OBSERVABILITY: (
"observability",
"metrics",
"latency",
"trace",
"audit",
"health",
"readiness",
"evidence",
),
}
def platform_ids() -> tuple[str, ...]:
return tuple(platform.platform_id for platform in PLATFORMS)
def profile_ids() -> tuple[str, ...]:
return tuple(profile.profile_id for profile in HUMAN_PROFILES)
def categories_for_text(text: str) -> tuple[NeedCategory, ...]:
lowered = text.lower()
matched: list[NeedCategory] = []
for category, keywords in CATEGORY_KEYWORDS.items():
if any(keyword.lower() in lowered for keyword in keywords):
matched.append(category)
return tuple(matched)
def get_platform(platform_id: str) -> PlatformDefinition:
return PLATFORM_BY_ID[platform_id]
def get_profile(profile_id: str) -> HumanProfile:
return PROFILE_BY_ID[profile_id]

153
src/mais_humana/charts.py Normal file
View File

@@ -0,0 +1,153 @@
"""SVG chart generation for human reports."""
from __future__ import annotations
from math import cos, pi, sin
from pathlib import Path
from typing import Sequence
from .catalog import HUMAN_PROFILES, PROFILE_BY_ID
from .models import MatrixCell, PlatformHumanReport
def svg_escape(value: object) -> str:
return (
str(value)
.replace("&", "&amp;")
.replace("<", "&lt;")
.replace(">", "&gt;")
.replace('"', "&quot;")
)
def color_for_score(score: int) -> str:
if score >= 85:
return "#1f9d72"
if score >= 70:
return "#43b881"
if score >= 55:
return "#e0a72e"
if score >= 35:
return "#d66b37"
return "#bf3f4a"
def text(x: int | float, y: int | float, value: str, size: int = 12, anchor: str = "start", weight: str = "400") -> str:
return (
f'<text x="{x}" y="{y}" font-family="Arial, sans-serif" font-size="{size}" '
f'font-weight="{weight}" text-anchor="{anchor}" fill="#18202a">{svg_escape(value)}</text>'
)
def rect(x: int | float, y: int | float, width: int | float, height: int | float, fill: str, stroke: str = "none") -> str:
return f'<rect x="{x}" y="{y}" width="{width}" height="{height}" rx="4" fill="{fill}" stroke="{stroke}"/>'
def write_svg(path: Path, width: int, height: int, body: str) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
svg = (
f'<svg xmlns="http://www.w3.org/2000/svg" width="{width}" height="{height}" '
f'viewBox="0 0 {width} {height}" role="img">'
'<rect width="100%" height="100%" fill="#f8fafc"/>'
+ body
+ "</svg>"
)
path.write_text(svg, encoding="utf-8")
return path
def matrix_heatmap_svg(path: Path, cells: Sequence[MatrixCell]) -> Path:
platform_ids = sorted({cell.platform_id for cell in cells})
profiles = HUMAN_PROFILES
cell_w = 74
cell_h = 32
left = 190
top = 130
width = left + cell_w * len(profiles) + 40
height = top + cell_h * len(platform_ids) + 70
by_pair = {(cell.platform_id, cell.profile_id): cell for cell in cells}
parts: list[str] = []
parts.append(text(30, 36, "Matriz plataforma x perfil humano", 20, weight="700"))
parts.append(text(30, 58, "Score 0-100 calculado por evidencias locais, categorias e lacunas.", 12))
for index, profile in enumerate(profiles):
x = left + index * cell_w + cell_w / 2
label = profile.profile_id.replace("_", " ")
parts.append(
f'<text x="{x}" y="{top - 16}" font-family="Arial, sans-serif" font-size="10" '
'text-anchor="end" transform="rotate(-45 '
f'{x},{top - 16})" fill="#334155">{svg_escape(label)}</text>'
)
for row_index, platform_id in enumerate(platform_ids):
y = top + row_index * cell_h
parts.append(text(30, y + 21, platform_id, 12, weight="700"))
for col_index, profile in enumerate(profiles):
x = left + col_index * cell_w
cell = by_pair.get((platform_id, profile.profile_id))
score = cell.score if cell else 0
parts.append(rect(x, y, cell_w - 4, cell_h - 4, color_for_score(score), "#ffffff"))
parts.append(text(x + (cell_w - 4) / 2, y + 19, str(score), 11, anchor="middle", weight="700"))
parts.append(text(30, height - 28, "Verde indica maior prontidao humana; vermelho indica lacuna prioritaria.", 11))
return write_svg(path, width, height, "".join(parts))
def platform_bar_svg(path: Path, reports: Sequence[PlatformHumanReport]) -> Path:
ordered = sorted(reports, key=lambda report: report.average_score, reverse=True)
width = 980
row_h = 38
top = 72
left = 245
height = top + row_h * len(ordered) + 45
parts: list[str] = []
parts.append(text(30, 36, "Maturidade humana por plataforma", 20, weight="700"))
for index, report in enumerate(ordered):
y = top + index * row_h
parts.append(text(30, y + 20, report.platform.platform_id, 12, weight="700"))
parts.append(rect(left, y, 600, 22, "#e2e8f0"))
bar_w = max(2, report.average_score * 6)
parts.append(rect(left, y, bar_w, 22, color_for_score(report.average_score)))
parts.append(text(left + bar_w + 10, y + 16, str(report.average_score), 12, weight="700"))
return write_svg(path, width, height, "".join(parts))
def profile_radar_svg(path: Path, report: PlatformHumanReport) -> Path:
cells = sorted(report.cells, key=lambda cell: cell.profile_id)
if not cells:
return write_svg(path, 640, 480, text(30, 30, "Sem dados"))
width = 760
height = 680
cx = width / 2
cy = height / 2 + 20
radius = 230
parts: list[str] = []
parts.append(text(30, 36, f"Radar humano: {report.platform.title}", 20, weight="700"))
for ring in range(1, 6):
r = radius * ring / 5
points = polygon_points(len(cells), cx, cy, r)
parts.append(f'<polygon points="{points}" fill="none" stroke="#cbd5e1" stroke-width="1"/>')
data_points = []
for index, cell in enumerate(cells):
angle = -pi / 2 + 2 * pi * index / len(cells)
score_radius = radius * cell.score / 100
data_points.append((cx + cos(angle) * score_radius, cy + sin(angle) * score_radius))
label_radius = radius + 58
profile = PROFILE_BY_ID[cell.profile_id]
lx = cx + cos(angle) * label_radius
ly = cy + sin(angle) * label_radius
parts.append(text(round(lx, 2), round(ly, 2), profile.name[:22], 10, anchor="middle"))
parts.append(
'<polygon points="'
+ " ".join(f"{round(x, 2)},{round(y, 2)}" for x, y in data_points)
+ '" fill="#2f80ed55" stroke="#2f80ed" stroke-width="2"/>'
)
for x, y in data_points:
parts.append(f'<circle cx="{round(x,2)}" cy="{round(y,2)}" r="4" fill="#2f80ed"/>')
return write_svg(path, width, height, "".join(parts))
def polygon_points(count: int, cx: float, cy: float, radius: float) -> str:
points = []
for index in range(count):
angle = -pi / 2 + 2 * pi * index / count
points.append(f"{round(cx + cos(angle) * radius, 2)},{round(cy + sin(angle) * radius, 2)}")
return " ".join(points)

158
src/mais_humana/cli.py Normal file
View File

@@ -0,0 +1,158 @@
"""Command line interface for the Mais Humana platform."""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from .models import as_plain_data
from .matrix import build_global_recommendations, build_matrix, build_platform_reports
from .operational_dossier import build_execution_round_dossier
from .governance_engine import build_governance_portfolio, compact_governance_payload
from .human_readiness_registry import build_readiness_registry
from .runtime_budget import build_round_line_budget
from .orders import build_exit_orders
from .reports import generate
from .scanner import environment_summary, scan_ecosystem
from .storage import table_counts
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="mais-humana", description="Generate human-centered ecosystem reports.")
sub = parser.add_subparsers(dest="command", required=True)
scan = sub.add_parser("scan", help="Scan ecosystem repositories and print compact JSON summary.")
scan.add_argument("--ecosystem-root", default="G:/_codex-git")
gen = sub.add_parser("generate", help="Generate DOCX, SVG, JSON, matrices, and service orders.")
gen.add_argument("--ecosystem-root", default="G:/_codex-git")
gen.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
gen.add_argument("--central-platform-folder", default="")
gen.add_argument("--push-status", default="")
sql = sub.add_parser("sql-counts", help="Print semantic SQLite table counts.")
sql.add_argument("--sqlite", required=True)
env = sub.add_parser("env", help="Print local environment summary.")
env.add_argument("--ecosystem-root", default="G:/_codex-git")
dossier = sub.add_parser("dossier", help="Print operational dossier JSON without writing artifacts.")
dossier.add_argument("--ecosystem-root", default="G:/_codex-git")
dossier.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
governance = sub.add_parser("governance", help="Print compact governance portfolio JSON.")
governance.add_argument("--ecosystem-root", default="G:/_codex-git")
governance.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
budget = sub.add_parser("line-budget", help="Print round line-budget JSON.")
budget.add_argument("--ecosystem-root", default="G:/_codex-git")
budget.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
return parser
def command_scan(args: argparse.Namespace) -> int:
root = Path(args.ecosystem_root)
scans = scan_ecosystem(root)
payload = {
"platforms": [
{
"platform_id": scan.platform.platform_id,
"exists": scan.exists,
"git_present": scan.git_present,
"code_lines": scan.code_lines,
"evidence": len(scan.evidence),
"warnings": scan.warnings,
}
for scan in scans
],
"total_code_lines": sum(scan.code_lines for scan in scans),
"total_evidence": sum(len(scan.evidence) for scan in scans),
}
print(json.dumps(payload, ensure_ascii=False, indent=2))
return 0
def command_generate(args: argparse.Namespace) -> int:
central = Path(args.central_platform_folder) if args.central_platform_folder else None
bundle = generate(
ecosystem_root=Path(args.ecosystem_root),
project_root=Path(args.project_root),
central_platform_folder=central,
push_status=args.push_status or None,
)
print(json.dumps(as_plain_data(bundle), ensure_ascii=False, indent=2))
return 0
def command_sql_counts(args: argparse.Namespace) -> int:
print(json.dumps(table_counts(Path(args.sqlite)), ensure_ascii=False, indent=2))
return 0
def command_env(args: argparse.Namespace) -> int:
print(json.dumps(environment_summary(Path(args.ecosystem_root)), ensure_ascii=False, indent=2))
return 0
def command_dossier(args: argparse.Namespace) -> int:
scans = scan_ecosystem(Path(args.ecosystem_root))
cells = build_matrix(scans)
reports = build_platform_reports(scans, cells)
recommendations = build_global_recommendations(reports)
orders = build_exit_orders(recommendations)
dossier = build_execution_round_dossier(
project_root=Path(args.project_root),
platform_reports=reports,
recommendations=recommendations,
output_orders=orders,
total_code_lines_analyzed=sum(scan.code_lines for scan in scans),
)
print(json.dumps(as_plain_data(dossier), ensure_ascii=False, indent=2))
return 0
def command_governance(args: argparse.Namespace) -> int:
scans = scan_ecosystem(Path(args.ecosystem_root))
cells = build_matrix(scans)
reports = build_platform_reports(scans, cells)
recommendations = build_global_recommendations(reports)
orders = build_exit_orders(recommendations)
dossier = build_execution_round_dossier(
project_root=Path(args.project_root),
platform_reports=reports,
recommendations=recommendations,
output_orders=orders,
total_code_lines_analyzed=sum(scan.code_lines for scan in scans),
)
portfolio = build_governance_portfolio(reports, recommendations=recommendations, round_dossier=dossier)
registry = build_readiness_registry(reports, portfolio)
payload = compact_governance_payload(portfolio)
payload["readiness_entries"] = len(registry.entries)
payload["weak_readiness_entries"] = len(registry.weak_entries)
print(json.dumps(payload, ensure_ascii=False, indent=2))
return 0
def command_line_budget(args: argparse.Namespace) -> int:
budget = build_round_line_budget(Path(args.ecosystem_root), Path(args.project_root))
print(json.dumps(as_plain_data(budget), ensure_ascii=False, indent=2))
return 0
def main(argv: list[str] | None = None) -> int:
parser = build_parser()
args = parser.parse_args(argv)
if args.command == "scan":
return command_scan(args)
if args.command == "generate":
return command_generate(args)
if args.command == "sql-counts":
return command_sql_counts(args)
if args.command == "env":
return command_env(args)
if args.command == "dossier":
return command_dossier(args)
if args.command == "governance":
return command_governance(args)
if args.command == "line-budget":
return command_line_budget(args)
parser.error(f"unknown command: {args.command}")
return 2
if __name__ == "__main__":
raise SystemExit(main())

191
src/mais_humana/commands.py Normal file
View File

@@ -0,0 +1,191 @@
"""Human-safe command catalog for validating generated artifacts."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Sequence
from .models import PlatformHumanReport, as_plain_data
@dataclass(slots=True)
class CommandSpec:
command_id: str
title: str
shell: str
cwd: str
purpose: str
expected_result: str
risk: str = "low"
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def quote_path(path: Path | str) -> str:
text = str(path).replace("'", "''")
return f"'{text}'"
def base_validation_commands(project_root: Path, central_folder: Path | None = None) -> tuple[CommandSpec, ...]:
commands = [
CommandSpec(
command_id="python-compileall",
title="Compilar pacote Python",
shell=f"$env:PYTHONPATH={quote_path(project_root / 'src')}; $env:MAIS_HUMANA_TEMP={quote_path(project_root / '.test-tmp')}; & $env:CODEX_PYTHON -m compileall {quote_path(project_root / 'src')}",
cwd=str(project_root),
purpose="Detectar erro de sintaxe nos modulos da plataforma.",
expected_result="compileall finaliza sem erro.",
),
CommandSpec(
command_id="python-unittest",
title="Executar testes unitarios",
shell=f"$env:PYTHONPATH={quote_path(project_root / 'src')}; $env:MAIS_HUMANA_TEMP={quote_path(project_root / '.test-tmp')}; & $env:CODEX_PYTHON -m unittest discover -s {quote_path(project_root / 'tests')}",
cwd=str(project_root),
purpose="Validar scanner, matriz, DOCX, SQLite, ordens, contratos e redaction.",
expected_result="todos os testes passam.",
),
CommandSpec(
command_id="generate-reports",
title="Regenerar relatorios humanos",
shell=(
f"$env:PYTHONPATH={quote_path(project_root / 'src')}; & $env:CODEX_PYTHON -m mais_humana.cli generate --ecosystem-root "
f"{quote_path(project_root.parent)} --project-root {quote_path(project_root)}"
),
cwd=str(project_root),
purpose="Gerar relatorios, graficos, matrizes e dados.",
expected_result="JSON de bundle emitido e artefatos atualizados.",
),
CommandSpec(
command_id="count-code-lines",
title="Contar linhas de codigo da plataforma",
shell="rg --files -g '*.py' | ForEach-Object { Get-Content -LiteralPath $_ | Measure-Object -Line }",
cwd=str(project_root),
purpose="Comprovar volume de codigo util produzido.",
expected_result="soma de linhas Python acima da meta da rodada.",
),
]
if central_folder is not None:
commands.append(
CommandSpec(
command_id="semantic-sql-counts",
title="Contar registros do SQL semantico",
shell=(
f"& $env:CODEX_PYTHON -m mais_humana.cli sql-counts --sqlite "
f"{quote_path(central_folder / 'controle-semantico.sqlite')}"
),
cwd=str(project_root),
purpose="Verificar memoria semantica compacta da pasta gerencial.",
expected_result="contagens de files, service_orders, platform_reports e recommendations.",
)
)
return tuple(commands)
def platform_validation_commands(reports: Sequence[PlatformHumanReport]) -> tuple[CommandSpec, ...]:
commands: list[CommandSpec] = []
for report in reports:
scripts = {script.name: script.command for script in report.scan.scripts}
if "test" in scripts:
commands.append(
CommandSpec(
command_id=f"{report.platform.platform_id}-test",
title=f"Testar {report.platform.title}",
shell="npm test",
cwd=report.scan.repo_path,
purpose="Executar teste canonico declarado pelo repositorio real.",
expected_result="suite do repositorio finaliza sem erro.",
risk="medium",
)
)
if "build" in scripts:
commands.append(
CommandSpec(
command_id=f"{report.platform.platform_id}-build",
title=f"Build {report.platform.title}",
shell="npm run build",
cwd=report.scan.repo_path,
purpose="Validar empacotamento ou typecheck do repositorio real.",
expected_result="build finaliza sem erro.",
risk="medium",
)
)
return tuple(commands)
def commands_markdown(commands: Sequence[CommandSpec]) -> str:
lines = ["# Comandos humanos equivalentes", ""]
for command in commands:
lines.append(f"## {command.title}")
lines.append("")
lines.append(f"- id: `{command.command_id}`")
lines.append(f"- cwd: `{command.cwd}`")
lines.append(f"- risco: `{command.risk}`")
lines.append(f"- finalidade: {command.purpose}")
lines.append(f"- esperado: {command.expected_result}")
lines.append("")
lines.append("```powershell")
lines.append(command.shell)
lines.append("```")
lines.append("")
return "\n".join(lines).strip() + "\n"
def commands_by_risk(commands: Sequence[CommandSpec]) -> dict[str, int]:
counts: dict[str, int] = {}
for command in commands:
counts[command.risk] = counts.get(command.risk, 0) + 1
return dict(sorted(counts.items()))
def command_ids(commands: Sequence[CommandSpec]) -> tuple[str, ...]:
return tuple(command.command_id for command in commands)
def command_summary(commands: Sequence[CommandSpec]) -> str:
counts = commands_by_risk(commands)
parts = [f"{risk}={count}" for risk, count in counts.items()]
return f"{len(commands)} comandos; " + ", ".join(parts)
def filter_commands(commands: Sequence[CommandSpec], risk: str | None = None, text: str | None = None) -> tuple[CommandSpec, ...]:
output: list[CommandSpec] = []
lowered = text.lower() if text else None
for command in commands:
if risk and command.risk != risk:
continue
if lowered and lowered not in f"{command.title} {command.purpose} {command.expected_result}".lower():
continue
output.append(command)
return tuple(output)
def first_command(commands: Sequence[CommandSpec], command_id: str) -> CommandSpec | None:
for command in commands:
if command.command_id == command_id:
return command
return None
def require_command(commands: Sequence[CommandSpec], command_id: str) -> CommandSpec:
command = first_command(commands, command_id)
if command is None:
raise KeyError(command_id)
return command
def command_titles(commands: Sequence[CommandSpec]) -> tuple[str, ...]:
return tuple(command.title for command in commands)
def has_command(commands: Sequence[CommandSpec], command_id: str) -> bool:
return first_command(commands, command_id) is not None
def command_count(commands: Sequence[CommandSpec]) -> int:
return len(commands)
def command_count_label(commands: Sequence[CommandSpec]) -> str:
return f"{command_count(commands)} comandos"

137
src/mais_humana/contract.py Normal file
View File

@@ -0,0 +1,137 @@
"""Public contract export for the Mais Humana platform."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
from .catalog import HUMAN_NEEDS, HUMAN_PROFILES, PLATFORMS
from .models import PlatformHumanReport, ReportBundle, as_plain_data, utc_now
@dataclass(slots=True)
class ContractSurface:
surface_id: str
title: str
description: str
output_files: tuple[str, ...]
consumers: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class MaisHumanaContract:
contract_version: str
generated_at: str
platform_id: str
mission: str
surfaces: tuple[ContractSurface, ...]
profile_ids: tuple[str, ...]
platform_ids: tuple[str, ...]
need_ids: tuple[str, ...]
report_count: int
matrix_cells: int
total_code_lines: int
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def default_surfaces() -> tuple[ContractSurface, ...]:
return (
ContractSurface(
surface_id="ecosystem_report",
title="Relatorio geral humano",
description="Leitura executiva do ecossistema por pessoas, plataformas, lacunas e riscos.",
output_files=(
"ecossistema/RELATORIO-GERAL-DO-ECOSSISTEMA-humana.md",
"relatorios-docx/RELATORIO-GERAL-DO-ECOSSISTEMA-humana.docx",
),
consumers=("CEO", "planejamento estrategico", "central de OS"),
),
ContractSurface(
surface_id="platform_reports",
title="Relatorios por plataforma",
description="Leitura por plataforma com perfis atendidos, gaps, evidencias e recomendacoes.",
output_files=("plataformas/*.md", "relatorios-docx/plataformas/*.docx"),
consumers=("gestor operacional", "suporte", "juridico", "financeiro"),
),
ContractSurface(
surface_id="human_matrix",
title="Matriz plataforma x perfil",
description="Tabela de score que mostra quem e atendido por qual plataforma.",
output_files=("matrizes/matriz-plataforma-perfil.csv", "graficos/matriz-plataforma-perfil.svg"),
consumers=("planejamento estrategico", "MCP", "UI Platform"),
),
ContractSurface(
surface_id="quality_gate",
title="Quality Gate Mais Humano",
description="Separacao entre readiness tecnico e prontidao para leitura humana.",
output_files=("ecossistema/QUALITY-GATE-MAIS-HUMANO.md", "dados/quality-gates.json"),
consumers=("gestor operacional", "auditoria", "central de OS"),
),
ContractSurface(
surface_id="service_orders",
title="Ordens orientadoras",
description="Ordens executivas e gerenciais criadas a partir de lacunas reais.",
output_files=("os-orientadoras/ordens-de-saida.json", "central/orders/**/*.md"),
consumers=("Codex", "nucleo de gestao operacional"),
),
)
def build_contract(bundle: ReportBundle, reports: Sequence[PlatformHumanReport]) -> MaisHumanaContract:
return MaisHumanaContract(
contract_version="mais-humana.contract.v1",
generated_at=utc_now(),
platform_id="tudo-para-ia-mais-humana",
mission=(
"Traduzir o estado tecnico do ecossistema Tudo Para IA em compreensao humana, "
"metas de atendimento, visao por perfil, relatorios executivos e orientacao de OS."
),
surfaces=default_surfaces(),
profile_ids=tuple(profile.profile_id for profile in HUMAN_PROFILES),
platform_ids=tuple(platform.platform_id for platform in PLATFORMS),
need_ids=tuple(need.need_id for need in HUMAN_NEEDS),
report_count=len(reports),
matrix_cells=bundle.matrix_cells,
total_code_lines=bundle.total_code_lines_analyzed,
)
def contract_markdown(contract: MaisHumanaContract) -> str:
lines = [
"# Contrato publico - tudo-para-ia-mais-humana",
"",
f"- contract_version: `{contract.contract_version}`",
f"- generated_at: `{contract.generated_at}`",
f"- platform_id: `{contract.platform_id}`",
f"- report_count: `{contract.report_count}`",
f"- matrix_cells: `{contract.matrix_cells}`",
f"- total_code_lines: `{contract.total_code_lines}`",
"",
"## Missao",
"",
contract.mission,
"",
"## Surfaces",
"",
]
for surface in contract.surfaces:
lines.append(f"### {surface.title}")
lines.append("")
lines.append(f"- surface_id: `{surface.surface_id}`")
lines.append("")
lines.append(surface.description)
lines.append("")
lines.append("Arquivos:")
for output in surface.output_files:
lines.append(f"- `{output}`")
lines.append("")
lines.append("Consumidores:")
for consumer in surface.consumers:
lines.append(f"- {consumer}")
lines.append("")
return "\n".join(lines).strip() + "\n"

View File

@@ -0,0 +1,270 @@
"""Small DOCX writer built with the Python standard library.
The writer intentionally supports only the structures this platform needs:
headings, paragraphs, bullet lists, simple tables, and page breaks. That keeps
the reporting pipeline portable inside operational mirrors where optional DOCX
libraries may not be installed.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Iterable, Sequence
from zipfile import ZIP_DEFLATED, ZipFile
import html
WORD_NS = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
REL_NS = "http://schemas.openxmlformats.org/package/2006/relationships"
DOC_REL_NS = "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
def esc(value: object) -> str:
return html.escape(str(value), quote=True)
def tag(name: str, content: str = "", attrs: dict[str, str] | None = None) -> str:
attr_text = ""
if attrs:
attr_text = " " + " ".join(f'{key}="{esc(value)}"' for key, value in attrs.items())
return f"<{name}{attr_text}>{content}</{name}>"
def empty_tag(name: str, attrs: dict[str, str] | None = None) -> str:
attr_text = ""
if attrs:
attr_text = " " + " ".join(f'{key}="{esc(value)}"' for key, value in attrs.items())
return f"<{name}{attr_text}/>"
def text_run(text: str, bold: bool = False, italic: bool = False) -> str:
props = ""
if bold or italic:
inner = ""
if bold:
inner += empty_tag("w:b")
if italic:
inner += empty_tag("w:i")
props = tag("w:rPr", inner)
preserve = {"xml:space": "preserve"} if text.startswith(" ") or text.endswith(" ") else None
return tag("w:r", props + tag("w:t", esc(text), preserve))
def paragraph_xml(text: str = "", style: str | None = None, bullet: bool = False) -> str:
props = ""
if style:
props += tag("w:pStyle", "", {"w:val": style})
if bullet:
props += tag("w:numPr", tag("w:ilvl", "", {"w:val": "0"}) + tag("w:numId", "", {"w:val": "1"}))
prop_xml = tag("w:pPr", props) if props else ""
return tag("w:p", prop_xml + text_run(text))
def heading_xml(text: str, level: int) -> str:
level = max(1, min(4, int(level)))
return paragraph_xml(text, style=f"Heading{level}")
def page_break_xml() -> str:
return tag("w:p", tag("w:r", empty_tag("w:br", {"w:type": "page"})))
def cell_xml(value: str) -> str:
props = tag("w:tcPr", tag("w:tcW", "", {"w:w": "2400", "w:type": "dxa"}))
return tag("w:tc", props + paragraph_xml(value))
def row_xml(values: Sequence[str]) -> str:
return tag("w:tr", "".join(cell_xml(value) for value in values))
def table_xml(headers: Sequence[str], rows: Sequence[Sequence[str]]) -> str:
border = empty_tag("w:top", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
border += empty_tag("w:left", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
border += empty_tag("w:bottom", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
border += empty_tag("w:right", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
border += empty_tag("w:insideH", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
border += empty_tag("w:insideV", {"w:val": "single", "w:sz": "4", "w:space": "0", "w:color": "B8C0CC"})
props = tag("w:tblPr", tag("w:tblBorders", border))
body = row_xml(headers) + "".join(row_xml([str(cell) for cell in row]) for row in rows)
return tag("w:tbl", props + body)
@dataclass(slots=True)
class DocxElement:
kind: str
text: str = ""
level: int = 1
headers: tuple[str, ...] = ()
rows: tuple[tuple[str, ...], ...] = ()
@dataclass
class DocxDocument:
"""Minimal document model that can be written to a .docx file."""
title: str
subject: str = "Tudo Para IA Mais Humana"
creator: str = "mais_humana"
elements: list[DocxElement] = field(default_factory=list)
def heading(self, text: str, level: int = 1) -> None:
self.elements.append(DocxElement(kind="heading", text=text, level=level))
def paragraph(self, text: str = "") -> None:
self.elements.append(DocxElement(kind="paragraph", text=text))
def bullet(self, text: str) -> None:
self.elements.append(DocxElement(kind="bullet", text=text))
def table(self, headers: Sequence[str], rows: Sequence[Sequence[object]]) -> None:
normalized_rows = tuple(tuple(str(value) for value in row) for row in rows)
self.elements.append(DocxElement(kind="table", headers=tuple(headers), rows=normalized_rows))
def page_break(self) -> None:
self.elements.append(DocxElement(kind="page_break"))
def extend_paragraphs(self, lines: Iterable[str]) -> None:
for line in lines:
stripped = str(line).strip()
if not stripped:
self.paragraph("")
elif stripped.endswith(":") and len(stripped) < 80:
self.heading(stripped[:-1], 2)
else:
self.paragraph(stripped)
def document_body(self) -> str:
body = [heading_xml(self.title, 1)]
for element in self.elements:
if element.kind == "heading":
body.append(heading_xml(element.text, element.level))
elif element.kind == "paragraph":
body.append(paragraph_xml(element.text))
elif element.kind == "bullet":
body.append(paragraph_xml(element.text, bullet=True))
elif element.kind == "table":
body.append(table_xml(element.headers, element.rows))
elif element.kind == "page_break":
body.append(page_break_xml())
section_props = tag(
"w:sectPr",
empty_tag("w:pgSz", {"w:w": "11906", "w:h": "16838"})
+ empty_tag("w:pgMar", {"w:top": "1440", "w:right": "1080", "w:bottom": "1440", "w:left": "1080"}),
)
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
f'<w:document xmlns:w="{WORD_NS}">'
+ tag("w:body", "".join(body) + section_props)
+ "</w:document>"
)
def core_properties(self) -> str:
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<cp:coreProperties xmlns:cp="http://schemas.openxmlformats.org/package/2006/metadata/core-properties" '
'xmlns:dc="http://purl.org/dc/elements/1.1/" '
'xmlns:dcterms="http://purl.org/dc/terms/" '
'xmlns:dcmitype="http://purl.org/dc/dcmitype/" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
f"<dc:title>{esc(self.title)}</dc:title>"
f"<dc:subject>{esc(self.subject)}</dc:subject>"
f"<dc:creator>{esc(self.creator)}</dc:creator>"
"<cp:lastModifiedBy>mais_humana</cp:lastModifiedBy>"
"</cp:coreProperties>"
)
def styles(self) -> str:
styles = [
style_xml("Normal", "paragraph", "Normal", "Calibri", 22),
style_xml("Heading1", "paragraph", "heading 1", "Aptos Display", 32, bold=True),
style_xml("Heading2", "paragraph", "heading 2", "Aptos Display", 26, bold=True),
style_xml("Heading3", "paragraph", "heading 3", "Aptos Display", 23, bold=True),
style_xml("Heading4", "paragraph", "heading 4", "Aptos Display", 21, bold=True),
]
numbering = (
'<w:numbering xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main">'
'<w:abstractNum w:abstractNumId="1">'
'<w:lvl w:ilvl="0"><w:start w:val="1"/><w:numFmt w:val="bullet"/>'
'<w:lvlText w:val="*"/><w:lvlJc w:val="left"/></w:lvl>'
"</w:abstractNum><w:num w:numId=\"1\"><w:abstractNumId w:val=\"1\"/></w:num></w:numbering>"
)
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
f'<w:styles xmlns:w="{WORD_NS}">' + "".join(styles) + "</w:styles>"
), numbering
def write(self, path: Path) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
styles, numbering = self.styles()
with ZipFile(path, "w", compression=ZIP_DEFLATED) as archive:
archive.writestr("[Content_Types].xml", content_types_xml())
archive.writestr("_rels/.rels", package_relationships_xml())
archive.writestr("docProps/core.xml", self.core_properties())
archive.writestr("word/document.xml", self.document_body())
archive.writestr("word/styles.xml", styles)
archive.writestr("word/numbering.xml", numbering)
archive.writestr("word/_rels/document.xml.rels", document_relationships_xml())
return path
def style_xml(style_id: str, style_type: str, name: str, font: str, size: int, bold: bool = False) -> str:
run_props = tag("w:rFonts", "", {"w:ascii": font, "w:hAnsi": font})
run_props += tag("w:sz", "", {"w:val": str(size)})
if bold:
run_props += empty_tag("w:b")
return tag(
"w:style",
tag("w:name", "", {"w:val": name}) + tag("w:rPr", run_props),
{"w:type": style_type, "w:styleId": style_id},
)
def content_types_xml() -> str:
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">'
'<Default Extension="rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>'
'<Default Extension="xml" ContentType="application/xml"/>'
'<Override PartName="/word/document.xml" ContentType="application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml"/>'
'<Override PartName="/word/styles.xml" ContentType="application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml"/>'
'<Override PartName="/word/numbering.xml" ContentType="application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml"/>'
'<Override PartName="/docProps/core.xml" ContentType="application/vnd.openxmlformats-package.core-properties+xml"/>'
"</Types>"
)
def package_relationships_xml() -> str:
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
f'<Relationships xmlns="{REL_NS}">'
f'<Relationship Id="rId1" Type="{DOC_REL_NS}/officeDocument" Target="word/document.xml"/>'
'<Relationship Id="rId2" Type="http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties" Target="docProps/core.xml"/>'
"</Relationships>"
)
def document_relationships_xml() -> str:
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
f'<Relationships xmlns="{REL_NS}">'
f'<Relationship Id="rId1" Type="{DOC_REL_NS}/styles" Target="styles.xml"/>'
f'<Relationship Id="rId2" Type="{DOC_REL_NS}/numbering" Target="numbering.xml"/>'
"</Relationships>"
)
def write_lines_docx(path: Path, title: str, lines: Sequence[str]) -> Path:
doc = DocxDocument(title=title)
for line in lines:
clean = str(line).strip()
if not clean:
doc.paragraph("")
elif len(clean) < 90 and not clean.endswith(".") and ":" not in clean:
doc.heading(clean, 2)
elif clean.startswith("- "):
doc.bullet(clean[2:])
else:
doc.paragraph(clean)
return doc.write(path)

View File

@@ -0,0 +1,239 @@
"""Evidence graph derived from governance, workflows, registry, and orders."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .exit_order_compiler import CompiledOrderSet
from .governance_models import EcosystemGovernancePortfolio
from .human_readiness_registry import ReadinessRegistry
from .models import as_plain_data, merge_unique, slugify
from .workflow_registry import WorkflowPortfolio
@dataclass(slots=True)
class EvidenceNode:
node_id: str
label: str
node_type: str
status: str
weight: int
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class EvidenceEdge:
source: str
target: str
relation: str
weight: int
reason: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class EvidenceGraph:
nodes: tuple[EvidenceNode, ...]
edges: tuple[EvidenceEdge, ...]
summary: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def node_id(*parts: str) -> str:
return slugify(".".join(parts))
def add_node(nodes: dict[str, EvidenceNode], node: EvidenceNode) -> None:
if node.node_id not in nodes:
nodes[node.node_id] = node
def add_edge(edges: list[EvidenceEdge], source: str, target: str, relation: str, weight: int, reason: str) -> None:
edges.append(EvidenceEdge(source=source, target=target, relation=relation, weight=weight, reason=reason))
def platform_nodes(portfolio: EcosystemGovernancePortfolio, nodes: dict[str, EvidenceNode]) -> None:
for card in portfolio.cards:
add_node(
nodes,
EvidenceNode(
node_id=node_id("platform", card.platform_id),
label=card.platform_id,
node_type="platform",
status=card.status_label,
weight=card.governance_score,
),
)
def governance_nodes_edges(portfolio: EcosystemGovernancePortfolio, nodes: dict[str, EvidenceNode], edges: list[EvidenceEdge]) -> None:
for card in portfolio.cards:
platform_node = node_id("platform", card.platform_id)
for check in card.checks:
if check.status.value == "not_applicable":
continue
check_node = node_id("check", card.platform_id, check.check_id)
add_node(
nodes,
EvidenceNode(
node_id=check_node,
label=check.title,
node_type="check",
status=check.status.value,
weight=check.score,
),
)
add_edge(edges, platform_node, check_node, f"has_check:{check.domain.value}", check.score, check.reason)
for evidence in check.evidence[:4]:
evidence_node = node_id("evidence", evidence.reference)
add_node(
nodes,
EvidenceNode(
node_id=evidence_node,
label=evidence.reference,
node_type="evidence",
status=evidence.kind.value,
weight=round(evidence.confidence * 100),
),
)
add_edge(edges, check_node, evidence_node, "supported_by", round(evidence.confidence * 100), evidence.summary)
def registry_nodes_edges(registry: ReadinessRegistry, nodes: dict[str, EvidenceNode], edges: list[EvidenceEdge]) -> None:
for entry in registry.entries:
if entry.status == "pronto":
continue
profile_node = node_id("profile", entry.profile_id)
platform_node = node_id("platform", entry.platform_id)
entry_node = node_id("readiness", entry.platform_id, entry.profile_id)
add_node(nodes, EvidenceNode(profile_node, entry.profile_name, "profile", "tracked", entry.human_score))
add_node(nodes, EvidenceNode(entry_node, f"{entry.platform_id}/{entry.profile_id}", "readiness", entry.status, entry.human_score))
add_edge(edges, platform_node, entry_node, "serves_profile", entry.human_score, entry.recommended_action)
add_edge(edges, entry_node, profile_node, "for_profile", entry.human_score, entry.recommended_action)
def workflow_nodes_edges(workflows: WorkflowPortfolio, nodes: dict[str, EvidenceNode], edges: list[EvidenceEdge]) -> None:
by_workflow = {workflow.workflow_id: workflow for workflow in workflows.workflows}
for evaluation in workflows.evaluations:
workflow = by_workflow[evaluation.workflow_id]
workflow_node = node_id("workflow", workflow.workflow_id)
add_node(nodes, EvidenceNode(workflow_node, workflow.title, "workflow", evaluation.status, evaluation.score))
for platform_id in workflow.platforms:
platform_node = node_id("platform", platform_id)
add_edge(edges, workflow_node, platform_node, "depends_on_platform", evaluation.score, workflow.purpose)
for step in workflow.steps:
step_node = node_id("workflow-step", workflow.workflow_id, step.step_id)
add_node(nodes, EvidenceNode(step_node, step.title, "workflow_step", "tracked", evaluation.score))
add_edge(edges, workflow_node, step_node, "has_step", evaluation.score, step.validation)
add_edge(edges, step_node, node_id("platform", step.owner_platform), "owned_by", evaluation.score, step.human_output)
def order_nodes_edges(compiled: CompiledOrderSet | None, nodes: dict[str, EvidenceNode], edges: list[EvidenceEdge]) -> None:
if compiled is None:
return
for order in compiled.service_orders:
order_node = node_id("order", order.order_id)
platform_hint = "ecosystem"
lowered = f"{order.object_scope} {order.reason}".lower()
for token in ("business", "compliance", "customer_ops", "docs", "finance", "gettys", "identity", "integracoes", "intelligence", "mcps", "platform_base", "public", "stj", "ui"):
if token in lowered or token.replace("_", "-") in lowered:
platform_hint = token
break
add_node(nodes, EvidenceNode(order_node, order.title, "service_order", order.status.value, 50))
add_edge(edges, node_id("platform", platform_hint), order_node, f"creates_{order.order_type.value}", 50, order.reason)
def relation_edges(portfolio: EcosystemGovernancePortfolio, edges: list[EvidenceEdge]) -> None:
for source, target, relation in portfolio.relation_matrix:
add_edge(edges, node_id("platform", source), node_id("platform", target), relation, 40, "relacao declarada ou inferida pelo dominio")
def dedupe_edges(edges: Iterable[EvidenceEdge]) -> tuple[EvidenceEdge, ...]:
seen: set[tuple[str, str, str]] = set()
output: list[EvidenceEdge] = []
for edge in edges:
key = (edge.source, edge.target, edge.relation)
if key in seen:
continue
seen.add(key)
output.append(edge)
output.sort(key=lambda item: (item.source, item.relation, item.target))
return tuple(output)
def build_evidence_graph(
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
compiled_orders: CompiledOrderSet | None = None,
) -> EvidenceGraph:
nodes: dict[str, EvidenceNode] = {}
edges: list[EvidenceEdge] = []
platform_nodes(portfolio, nodes)
governance_nodes_edges(portfolio, nodes, edges)
registry_nodes_edges(registry, nodes, edges)
workflow_nodes_edges(workflows, nodes, edges)
order_nodes_edges(compiled_orders, nodes, edges)
relation_edges(portfolio, edges)
final_edges = dedupe_edges(edges)
summary = (
f"Nos: {len(nodes)}",
f"Arestas: {len(final_edges)}",
f"Plataformas: {sum(1 for node in nodes.values() if node.node_type == 'platform')}",
f"Checks: {sum(1 for node in nodes.values() if node.node_type == 'check')}",
f"Workflows: {sum(1 for node in nodes.values() if node.node_type == 'workflow')}",
)
return EvidenceGraph(nodes=tuple(sorted(nodes.values(), key=lambda item: item.node_id)), edges=final_edges, summary=summary)
def graph_markdown(graph: EvidenceGraph, limit: int = 160) -> str:
lines = ["# Grafo de evidencias Mais Humana", ""]
lines.extend(f"- {item}" for item in graph.summary)
lines.extend(["", "## Nos principais", ""])
for node in sorted(graph.nodes, key=lambda item: (-item.weight, item.node_type, item.node_id))[:limit]:
lines.append(f"- `{node.node_id}` [{node.node_type}] {node.label} status `{node.status}` peso `{node.weight}`")
lines.extend(["", "## Arestas principais", ""])
for edge in sorted(graph.edges, key=lambda item: (-item.weight, item.source, item.target))[:limit]:
lines.append(f"- `{edge.source}` -> `{edge.target}` ({edge.relation}) peso `{edge.weight}` - {edge.reason}")
return "\n".join(lines).strip() + "\n"
def graph_dot(graph: EvidenceGraph, limit_edges: int = 260) -> str:
lines = ["digraph MaisHumanaEvidence {", " rankdir=LR;", " node [shape=box, style=rounded];"]
for node in graph.nodes:
label = node.label.replace('"', "'")[:80]
color = "red" if node.status in {"blocked", "bloqueado", "critico"} else "orange" if node.status in {"attention", "atencao"} else "gray"
lines.append(f' "{node.node_id}" [label="{label}", color="{color}"];')
for edge in graph.edges[:limit_edges]:
label = edge.relation.replace('"', "'")[:60]
lines.append(f' "{edge.source}" -> "{edge.target}" [label="{label}"];')
lines.append("}")
return "\n".join(lines) + "\n"
def graph_rows(graph: EvidenceGraph) -> list[list[str]]:
rows = [["kind", "source", "target", "relation", "status", "weight", "label"]]
for node in graph.nodes:
rows.append(["node", node.node_id, "", node.node_type, node.status, str(node.weight), node.label])
for edge in graph.edges:
rows.append(["edge", edge.source, edge.target, edge.relation, "", str(edge.weight), edge.reason])
return rows
def graph_focus(graph: EvidenceGraph, token: str) -> tuple[str, ...]:
lowered = token.lower()
lines: list[str] = []
for node in graph.nodes:
if lowered in node.node_id.lower() or lowered in node.label.lower() or lowered in node.status.lower():
lines.append(f"node {node.node_id}: {node.label} ({node.status})")
for edge in graph.edges:
if lowered in edge.source.lower() or lowered in edge.target.lower() or lowered in edge.relation.lower() or lowered in edge.reason.lower():
lines.append(f"edge {edge.source}->{edge.target}: {edge.relation}")
return merge_unique(lines)[:80]

View File

@@ -0,0 +1,146 @@
"""Searchable evidence index for generated human reports."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .models import Evidence, EvidenceKind, PlatformScan, as_plain_data
@dataclass(slots=True)
class EvidenceRecord:
evidence_id: str
platform_id: str
kind: str
reference: str
summary: str
confidence: float
tags: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class EvidenceQuery:
platform_id: str | None = None
kind: str | None = None
min_confidence: float = 0.0
tag: str | None = None
text: str | None = None
def normalize_kind(kind: EvidenceKind | str) -> str:
return kind.value if isinstance(kind, EvidenceKind) else str(kind)
def record_from_evidence(platform_id: str, evidence: Evidence, index: int) -> EvidenceRecord:
return EvidenceRecord(
evidence_id=f"{platform_id}-{index:04d}-{normalize_kind(evidence.kind)}",
platform_id=platform_id,
kind=normalize_kind(evidence.kind),
reference=evidence.reference,
summary=evidence.summary,
confidence=evidence.confidence,
tags=tuple(evidence.tags),
)
def build_evidence_index(scans: Sequence[PlatformScan]) -> tuple[EvidenceRecord, ...]:
records: list[EvidenceRecord] = []
for scan in scans:
for index, evidence in enumerate(scan.evidence, start=1):
records.append(record_from_evidence(scan.platform.platform_id, evidence, index))
records.sort(key=lambda item: (item.platform_id, item.kind, -item.confidence, item.reference))
return tuple(records)
def query_evidence(records: Sequence[EvidenceRecord], query: EvidenceQuery) -> tuple[EvidenceRecord, ...]:
output: list[EvidenceRecord] = []
text = query.text.lower() if query.text else None
for record in records:
if query.platform_id and record.platform_id != query.platform_id:
continue
if query.kind and record.kind != query.kind:
continue
if record.confidence < query.min_confidence:
continue
if query.tag and query.tag not in record.tags:
continue
if text and text not in f"{record.summary} {record.reference} {' '.join(record.tags)}".lower():
continue
output.append(record)
return tuple(output)
def evidence_counts_by_platform(records: Sequence[EvidenceRecord]) -> dict[str, int]:
counts: dict[str, int] = {}
for record in records:
counts[record.platform_id] = counts.get(record.platform_id, 0) + 1
return dict(sorted(counts.items()))
def evidence_counts_by_kind(records: Sequence[EvidenceRecord]) -> dict[str, int]:
counts: dict[str, int] = {}
for record in records:
counts[record.kind] = counts.get(record.kind, 0) + 1
return dict(sorted(counts.items(), key=lambda item: (-item[1], item[0])))
def strongest_evidence(records: Sequence[EvidenceRecord], limit: int = 25) -> tuple[EvidenceRecord, ...]:
ordered = sorted(records, key=lambda item: (-item.confidence, item.platform_id, item.kind))
return tuple(ordered[:limit])
def weakest_evidence(records: Sequence[EvidenceRecord], limit: int = 25) -> tuple[EvidenceRecord, ...]:
ordered = sorted(records, key=lambda item: (item.confidence, item.platform_id, item.kind))
return tuple(ordered[:limit])
def evidence_markdown(records: Sequence[EvidenceRecord]) -> str:
lines = ["# Indice de evidencias humanas", ""]
lines.append(f"Total de evidencias: `{len(records)}`")
lines.append("")
lines.append("## Por plataforma")
lines.append("")
for platform_id, count in evidence_counts_by_platform(records).items():
lines.append(f"- {platform_id}: {count}")
lines.append("")
lines.append("## Por tipo")
lines.append("")
for kind, count in evidence_counts_by_kind(records).items():
lines.append(f"- {kind}: {count}")
lines.append("")
lines.append("## Evidencias fortes")
lines.append("")
for record in strongest_evidence(records, limit=30):
lines.append(f"- `{record.platform_id}` `{record.kind}` {record.confidence:.2f}: {record.reference} - {record.summary}")
lines.append("")
lines.append("## Evidencias fracas para revisar")
lines.append("")
for record in weakest_evidence(records, limit=20):
lines.append(f"- `{record.platform_id}` `{record.kind}` {record.confidence:.2f}: {record.reference} - {record.summary}")
return "\n".join(lines).strip() + "\n"
def evidence_records_for_platform(records: Sequence[EvidenceRecord], platform_id: str) -> tuple[EvidenceRecord, ...]:
return query_evidence(records, EvidenceQuery(platform_id=platform_id))
def evidence_records_for_human_surface(records: Sequence[EvidenceRecord]) -> tuple[EvidenceRecord, ...]:
kinds = {"ui_surface", "mcp_tool", "observability", "business_rule", "security"}
return tuple(record for record in records if record.kind in kinds)
def evidence_gap_summary(records: Sequence[EvidenceRecord], expected_platforms: Iterable[str]) -> tuple[str, ...]:
counts = evidence_counts_by_platform(records)
gaps: list[str] = []
for platform_id in expected_platforms:
count = counts.get(platform_id, 0)
if count == 0:
gaps.append(f"{platform_id}: nenhuma evidencia")
elif count < 5:
gaps.append(f"{platform_id}: poucas evidencias ({count})")
return tuple(gaps)

View File

@@ -0,0 +1,234 @@
"""Compile governance findings into service-order continuity."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .governance_models import EcosystemGovernancePortfolio, GovernanceOrderCandidate
from .models import OrderStatus, OrderType, ServiceOrder, incrementing_id, merge_unique, slugify
@dataclass(slots=True)
class CompiledOrderSet:
"""Service-order set produced from recommendations and governance checks."""
service_orders: tuple[ServiceOrder, ...]
source_candidates: tuple[GovernanceOrderCandidate, ...]
executive_count: int
managerial_count: int
notes: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return {
"service_orders": [order.to_dict() for order in self.service_orders],
"source_candidates": [candidate.to_dict() for candidate in self.source_candidates],
"executive_count": self.executive_count,
"managerial_count": self.managerial_count,
"notes": self.notes,
}
def priority_rank(value: str) -> int:
lowered = value.lower()
if lowered in {"critica", "critico", "critical"}:
return 4
if lowered in {"alta", "high"}:
return 3
if lowered in {"media", "medium"}:
return 2
if lowered in {"baixa", "low"}:
return 1
return 0
def candidate_sort_key(candidate: GovernanceOrderCandidate) -> tuple[int, str, str]:
return (-priority_rank(candidate.priority), candidate.platform_id, candidate.title)
def candidate_to_service_order(candidate: GovernanceOrderCandidate, index: int, project_id: str = "tudo-para-ia-mais-humana") -> ServiceOrder:
prefix = "EXECUTIVA" if candidate.order_type == OrderType.EXECUTIVE else "GERENCIAL"
return ServiceOrder(
order_id=incrementing_id(prefix, index, candidate.title),
order_type=candidate.order_type,
project_id=project_id,
title=candidate.title,
purpose=candidate.purpose,
object_scope=(
f"Plataforma relacionada: {candidate.platform_id}. "
"Checks de origem: " + ", ".join(candidate.source_check_ids)
),
reason=candidate.reason,
expected_result=candidate.expected_result,
affected_paths=candidate.affected_paths,
validations=candidate.validations,
ready_criteria=(
"check de governanca reavaliado",
"evidencia registrada",
"validacao executada ou pendencia real declarada",
"SQL semantico atualizado",
),
status=OrderStatus.PLANNED,
priority=candidate.priority,
)
def dedupe_service_orders(orders: Iterable[ServiceOrder]) -> tuple[ServiceOrder, ...]:
seen: set[tuple[str, str, str]] = set()
output: list[ServiceOrder] = []
for order in orders:
key = (order.order_type.value, slugify(order.title), order.object_scope[:80])
if key in seen:
continue
seen.add(key)
output.append(order)
return tuple(output)
def select_candidates(candidates: Sequence[GovernanceOrderCandidate], order_type: OrderType, limit: int) -> tuple[GovernanceOrderCandidate, ...]:
typed = [candidate for candidate in candidates if candidate.order_type == order_type]
typed.sort(key=candidate_sort_key)
return tuple(typed[:limit])
def compile_governance_orders(
portfolio: EcosystemGovernancePortfolio,
min_executive: int = 5,
min_managerial: int = 5,
project_id: str = "tudo-para-ia-mais-humana",
) -> CompiledOrderSet:
executive_candidates = list(select_candidates(portfolio.order_candidates, OrderType.EXECUTIVE, min_executive))
managerial_candidates = list(select_candidates(portfolio.order_candidates, OrderType.MANAGERIAL, min_managerial))
notes: list[str] = []
if len(executive_candidates) < min_executive:
notes.append(
f"Somente {len(executive_candidates)} candidatas executivas reais foram encontradas; "
"nao foram criadas ordens artificiais."
)
if len(managerial_candidates) < min_managerial:
notes.append(
f"Somente {len(managerial_candidates)} candidatas gerenciais reais foram encontradas; "
"nao foram criadas ordens artificiais."
)
orders: list[ServiceOrder] = []
for index, candidate in enumerate(executive_candidates, start=1):
orders.append(candidate_to_service_order(candidate, index, project_id))
for index, candidate in enumerate(managerial_candidates, start=1):
orders.append(candidate_to_service_order(candidate, index, project_id))
service_orders = dedupe_service_orders(orders)
return CompiledOrderSet(
service_orders=service_orders,
source_candidates=tuple(executive_candidates + managerial_candidates),
executive_count=sum(1 for order in service_orders if order.order_type == OrderType.EXECUTIVE),
managerial_count=sum(1 for order in service_orders if order.order_type == OrderType.MANAGERIAL),
notes=tuple(notes),
)
def merge_order_sets(primary: Sequence[ServiceOrder], governance: Sequence[ServiceOrder], min_executive: int = 5, min_managerial: int = 5) -> tuple[ServiceOrder, ...]:
merged: list[ServiceOrder] = list(primary)
executive_count = sum(1 for order in merged if order.order_type == OrderType.EXECUTIVE)
managerial_count = sum(1 for order in merged if order.order_type == OrderType.MANAGERIAL)
for order in governance:
if order.order_type == OrderType.EXECUTIVE and executive_count >= min_executive:
continue
if order.order_type == OrderType.MANAGERIAL and managerial_count >= min_managerial:
continue
merged.append(order)
if order.order_type == OrderType.EXECUTIVE:
executive_count += 1
else:
managerial_count += 1
return dedupe_service_orders(merged)
def compiled_orders_markdown(compiled: CompiledOrderSet) -> str:
lines = [
"# Ordens compiladas por governanca",
"",
f"- executivas: `{compiled.executive_count}`",
f"- gerenciais: `{compiled.managerial_count}`",
f"- candidatas de origem: `{len(compiled.source_candidates)}`",
"",
]
if compiled.notes:
lines.append("## Observacoes")
lines.append("")
lines.extend(f"- {note}" for note in compiled.notes)
lines.append("")
lines.append("## Ordens")
lines.append("")
for order in compiled.service_orders:
lines.append(f"### {order.order_id}")
lines.append("")
lines.append(f"- tipo: `{order.order_type.value}`")
lines.append(f"- prioridade: `{order.priority}`")
lines.append(f"- titulo: {order.title}")
lines.append(f"- motivo: {order.reason}")
lines.append(f"- resultado: {order.expected_result}")
lines.append("- validacoes:")
for validation in order.validations:
lines.append(f" - {validation}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def order_coverage_rows(compiled: CompiledOrderSet) -> list[list[str]]:
rows = [["order_id", "type", "priority", "title", "paths", "validations"]]
for order in compiled.service_orders:
rows.append(
[
order.order_id,
order.order_type.value,
order.priority,
order.title,
" | ".join(order.affected_paths),
" | ".join(order.validations),
]
)
return rows
def source_candidate_rows(compiled: CompiledOrderSet) -> list[list[str]]:
rows = [["candidate_id", "platform", "type", "priority", "title", "source_checks"]]
for candidate in compiled.source_candidates:
rows.append(
[
candidate.candidate_id,
candidate.platform_id,
candidate.order_type.value,
candidate.priority,
candidate.title,
" | ".join(candidate.source_check_ids),
]
)
return rows
def active_queue_from_orders(orders: Sequence[ServiceOrder]) -> tuple[str, ...]:
return tuple(order.order_id for order in orders if order.status == OrderStatus.PLANNED)
def explain_order_gap(compiled: CompiledOrderSet, order_type: OrderType, minimum: int = 5) -> str:
actual = compiled.executive_count if order_type == OrderType.EXECUTIVE else compiled.managerial_count
label = "executivas" if order_type == OrderType.EXECUTIVE else "gerenciais"
if actual >= minimum:
return f"Minimo de ordens {label} cumprido: {actual}/{minimum}."
return f"Minimo de ordens {label} parcial: {actual}/{minimum}; faltam checks reais suficientes."
def order_set_summary(compiled: CompiledOrderSet) -> tuple[str, ...]:
return (
explain_order_gap(compiled, OrderType.EXECUTIVE),
explain_order_gap(compiled, OrderType.MANAGERIAL),
f"Candidatas usadas: {len(compiled.source_candidates)}.",
f"Fila ativa compilada: {len(active_queue_from_orders(compiled.service_orders))}.",
)
def combine_order_notes(*sets: CompiledOrderSet) -> tuple[str, ...]:
notes: list[str] = []
for item in sets:
notes.extend(item.notes)
notes.extend(order_set_summary(item))
return merge_unique(notes)

View File

@@ -0,0 +1,856 @@
"""Governance check catalog for the Mais Humana platform.
The catalog is intentionally declarative. Each check is a small operational
contract that can be evaluated against a repository scan, a human report, and a
round dossier. The checks are not meant to replace project-specific tests.
They create a common language for deciding whether a platform is merely
technical, actually explainable to people, ready for the panel/GPT same-source
model, or blocked by an external/credential/decision dependency.
"""
from __future__ import annotations
from typing import Iterable
from .governance_models import (
CheckTemplate,
GovernanceAxis,
GovernanceDomain,
GovernanceSeverity,
)
from .models import OrderType
def check(
check_id: str,
axis: GovernanceAxis,
domain: GovernanceDomain,
title: str,
purpose: str,
positive_terms: Iterable[str],
negative_terms: Iterable[str],
required_terms: Iterable[str],
evidence_hints: Iterable[str],
applies_to: Iterable[str],
not_applicable_reason: str,
pass_summary: str,
attention_summary: str,
fail_summary: str,
suggested_action: str,
validation_steps: Iterable[str],
severity: GovernanceSeverity = GovernanceSeverity.MEDIUM,
required_for_human_ready: bool = False,
creates_order_type: OrderType = OrderType.MANAGERIAL,
order_title: str = "",
order_priority: str = "media",
) -> CheckTemplate:
return CheckTemplate(
check_id=check_id,
axis=axis,
domain=domain,
title=title,
purpose=purpose,
positive_terms=tuple(positive_terms),
negative_terms=tuple(negative_terms),
required_terms=tuple(required_terms),
evidence_hints=tuple(evidence_hints),
applies_to=tuple(applies_to),
not_applicable_reason=not_applicable_reason,
pass_summary=pass_summary,
attention_summary=attention_summary,
fail_summary=fail_summary,
suggested_action=suggested_action,
validation_steps=tuple(validation_steps),
severity=severity,
required_for_human_ready=required_for_human_ready,
creates_order_type=creates_order_type,
order_title=order_title,
order_priority=order_priority,
)
ALL_PLATFORMS = (
"business",
"compliance",
"customer_ops",
"docs",
"finance",
"gettys",
"identity",
"integracoes",
"intelligence",
"mcps",
"platform_base",
"public",
"stj",
"ui",
)
CORE_PLATFORMS = ("business", "identity", "integracoes", "docs", "mcps", "ui")
SELLABLE_PLATFORMS = ("business", "integracoes", "stj", "public", "finance")
SUPPORT_PLATFORMS = ("customer_ops", "integracoes", "business", "identity", "compliance")
DOCS_RELATED = ("docs", "mcps", "ui", "platform_base", "compliance", "public")
CLOUD_RELATED = ("integracoes", "mcps", "ui", "public", "gettys", "stj")
CHECK_TEMPLATES: tuple[CheckTemplate, ...] = (
check(
"repository.exists",
GovernanceAxis.LOCAL_TRACEABILITY,
GovernanceDomain.REPOSITORY,
"Repositorio local existe",
"Confirma que ha espelho local para leitura material antes de gerar avaliacao humana.",
("repositorio real encontrado", "repo.exists", "git local", "readme"),
("repositorio real nao encontrado", "caminho nao encontrado", "not_found"),
("repo_path",),
("repo_path", "README.md", ".git"),
ALL_PLATFORMS,
"A plataforma nao faz parte do catalogo operacional desta rodada.",
"Repositorio local foi encontrado e pode ser analisado.",
"Repositorio existe, mas ainda precisa de evidencias complementares.",
"Repositorio local nao foi encontrado; nao ha base material suficiente.",
"criar ou clonar o repositorio real sem numero da pasta gerencial",
("verificar caminho local", "confirmar .git", "registrar hash final"),
GovernanceSeverity.CRITICAL,
True,
OrderType.EXECUTIVE,
"Criar ou recuperar espelho local do repositorio real",
"alta",
),
check(
"repository.git-traceability",
GovernanceAxis.LOCAL_TRACEABILITY,
GovernanceDomain.REPOSITORY,
"Git local e remoto rastreaveis",
"Garante que a rodada possa informar status, commit, push e hash final.",
("git local detectado", "remote_origin", "branch", "head"),
("sem .git", "git local nao detectado", "dubious ownership", "index.lock"),
("branch", "head"),
(".git", "git status", "remote"),
ALL_PLATFORMS,
"A plataforma nao possui repositorio operacional nesta leitura.",
"Git local foi detectado e possui metadados uteis.",
"Git existe parcialmente ou requer validacao de remote/branch.",
"Git local esta ausente ou inacessivel para fechamento da OS.",
"corrigir permissao, safe.directory ou inicializar Git com origin correto",
("git status --short --branch", "git remote -v", "git rev-parse HEAD"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Restaurar rastreabilidade Git e sincronizacao",
"alta",
),
check(
"documentation.human-readme",
GovernanceAxis.HUMAN_DOCUMENTATION,
GovernanceDomain.DOCUMENTATION,
"README tecnico-humano minimo",
"Verifica se a plataforma explica missao, comandos e criterio humano sem exigir leitura do codigo.",
("readme tecnico", "missao", "execucao", "validacao", "papel no ecossistema"),
("readme tecnico nao encontrado", "sem documentacao inicial"),
("readme",),
("README.md", "docs/README.md"),
ALL_PLATFORMS,
"A plataforma nao precisa expor README separado neste contexto.",
"README ou documentacao principal foi encontrada.",
"README existe, mas precisa explicitar comandos, pessoas e evidencias.",
"README tecnico-humano nao foi encontrado.",
"criar README com missao, comandos, validacoes, relacao MCP e criterio de pronto",
("abrir README", "conferir comandos", "comparar com reports gerados"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Criar README tecnico-humano reconciliado",
),
check(
"documentation.runbook",
GovernanceAxis.HUMAN_DOCUMENTATION,
GovernanceDomain.DOCUMENTATION,
"Runbook operacional para humanos",
"Confirma que suporte, tecnico e gestor possuem proxima acao operacional clara.",
("runbook", "playbook", "proxima acao", "diagnostico", "comandos humanos"),
("sem runbook", "sem proxima acao", "erro bruto"),
(),
("runbook", "COMANDOS-HUMANOS-EQUIVALENTES", "playbooks"),
ALL_PLATFORMS,
"Nao ha runbook esperado para esta plataforma.",
"Runbook ou playbook operacional foi detectado.",
"Ha sinais de operacao, mas sem runbook humano completo.",
"A plataforma nao oferece caminho operacional claro para humanos.",
"criar playbook por perfil com diagnostico, acao, evidencia e limite seguro",
("validar playbook", "executar comando de smoke", "registrar evidencia"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Consolidar runbooks e comandos humanos equivalentes",
),
check(
"contract.openapi-or-equivalent",
GovernanceAxis.CONTRACT_VERSIONING,
GovernanceDomain.CONTRACT,
"Contrato OpenAPI ou equivalente",
"Evita que painel, GPT e suporte dependam de implementacao implícita sem contrato audivel.",
("openapi", "swagger", "schema", "contract", "contrato", "surfaceVersion"),
("openapi nao encontrado", "sem contrato", "contract missing"),
(),
("openapi.json", "schema", "contract"),
ALL_PLATFORMS,
"Esta plataforma nao expoe API ou surface operacional nesta etapa.",
"Contrato ou schema auditavel foi detectado.",
"Contrato existe parcialmente ou precisa de versionamento.",
"Contrato OpenAPI/equivalente nao foi encontrado.",
"publicar contrato minimo versionado ou declarar contrato alternativo na central",
("validar JSON/schema", "comparar rotas", "registrar contractVersion"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Publicar contrato operacional versionado",
),
check(
"contract.version-policy",
GovernanceAxis.CONTRACT_VERSIONING,
GovernanceDomain.CONTRACT,
"Politica de versao e compatibilidade",
"Garante que alteracoes futuras tenham schemaVersion, deprecacao e migracao controlada.",
("schemaVersion", "contractVersion", "compatibilityVersion", "deprecated", "migration"),
("breaking change sem registro", "sem versionamento", "versao ausente"),
("version",),
("contractVersion", "schemaVersion", "migrationNotes"),
ALL_PLATFORMS,
"Nao ha contrato vivo nesta plataforma ainda.",
"Versionamento de contrato aparece na evidencia.",
"Ha contrato, mas a politica de compatibilidade ainda precisa ser formalizada.",
"Sem versionamento, a plataforma acumula risco de regressao silenciosa.",
"criar politica de versionamento, deprecacao, substituicao e notas de migracao",
("exportar contrato", "validar versao", "registrar breakingChanges"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Formalizar politica de versao e compatibilidade",
),
check(
"tests.detected",
GovernanceAxis.TESTABLE_BEHAVIOR,
GovernanceDomain.TESTS,
"Testes ou smoke detectaveis",
"Confirma que a plataforma possui validacao automatizada ou smoke local rastreavel.",
("test", "spec", "smoke", "unittest", "vitest", "pytest", "node --test"),
("testes nao encontrados", "sem teste", "no tests"),
(),
("tests", "test.spec", "package.json", "pyproject.toml"),
ALL_PLATFORMS,
"Esta plataforma ainda esta somente planejada.",
"Teste ou smoke detectavel foi encontrado.",
"Ha sinais de teste, mas falta suite canonica do contrato humano.",
"Nenhum teste/smoke detectavel foi encontrado.",
"criar smoke canonico para health, readiness, contrato e relatorio humano",
("executar suite", "registrar saida", "incluir comando em README"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Criar testes canonicos de prontidao humana",
),
check(
"tests.regression",
GovernanceAxis.TESTABLE_BEHAVIOR,
GovernanceDomain.TESTS,
"Regressao de contrato e painel",
"Detecta se uma plataforma que era panelReady ou sameSource perdeu prontidao.",
("regression", "snapshot", "delta", "sameSource", "panelReady", "sourceHash"),
("regressao sem alerta", "perdeu panelReady", "mismatch"),
(),
("snapshot", "DELTA-MATURIDADE", "quality-gates"),
CORE_PLATFORMS,
"Regressao de painel nao se aplica a plataforma fora do nucleo de surfaces.",
"Sinais de regressao e snapshot foram encontrados.",
"Snapshots existem, mas precisam virar falha automatica quando houver divergencia.",
"Nao ha mecanismo claro de regressao para contrato/painel.",
"criar teste de regressao para listagem, detalhe, validacao, diagnostico e contrato",
("gerar snapshot", "comparar delta", "falhar se sourceHash divergir"),
GovernanceSeverity.HIGH,
False,
OrderType.EXECUTIVE,
"Implementar regressao de panelReady e sameSource",
"alta",
),
check(
"panel.backend-mcp",
GovernanceAxis.PANEL_BACKEND,
GovernanceDomain.MCP,
"MCP como backend do painel humano",
"Garante que painel nao crie backend paralelo para dados que o GPT tambem explica.",
("admin_ui", "screenData", "screenAction", "viewInstance", "panelReady"),
("backend paralelo", "fonte paralela", "dados divergentes"),
("panelReady",),
("admin_ui", "screen", "viewInstance"),
CORE_PLATFORMS,
"A plataforma nao entrega surface de painel nesta etapa.",
"Ha evidencia de backend MCP ou contrato admin_ui.",
"Ha sinais de painel, mas o backend MCP precisa ser explicitado.",
"A surface humana nao esta ligada ao MCP como fonte operacional.",
"formalizar admin_ui, screenData, screenAction, diagnostics e evidence no MCP",
("listar telas", "detalhar tela", "validar panelReady"),
GovernanceSeverity.HIGH,
True,
OrderType.MANAGERIAL,
"Consolidar MCP como backend oficial do painel humano",
"alta",
),
check(
"panel.same-source",
GovernanceAxis.GPT_PANEL_EQUIVALENCE,
GovernanceDomain.MCP,
"Mesma fonte para GPT e painel",
"A UI humana e a explicacao GPT devem usar o mesmo contrato, hashes e evidencia.",
("sameSource", "same source", "sourcePayloadHash", "sourceRecordsHash", "dataTruth"),
("sameSource false", "source mismatch", "divergente", "truthIssues"),
("source", "hash"),
("sourcePayloadHash", "sourceRecordsHash", "sameSource"),
CORE_PLATFORMS,
"A plataforma nao possui surface compartilhada GPT/painel.",
"Mesma fonte ou hashes de fonte foram detectados.",
"Ha painel, mas faltam hashes ou prova da mesma fonte.",
"Painel e GPT podem explicar estados diferentes.",
"reconciliar sourceEndpoint, sourceToolId, payload hash e records hash",
("comparar payload", "validar sourceHash", "gerar evidencia HTTP"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Reconciliar mesma fonte entre GPT e painel",
"alta",
),
check(
"panel.compact-response",
GovernanceAxis.PANEL_BACKEND,
GovernanceDomain.MCP,
"Resposta compacta para auditoria humana",
"Evita estouro de contexto e torna o painel/gpt auditavel em escala.",
("summaryOnly", "failuresOnly", "limit", "cursor", "mode=compact", "compact"),
("resposta grande demais", "sem paginacao", "timeout por payload"),
(),
("limit", "cursor", "summaryOnly", "failuresOnly"),
CORE_PLATFORMS,
"Nao ha ferramenta de auditoria em escala nesta plataforma.",
"Ha suporte a resposta compacta, filtros ou paginacao.",
"Ha dados de painel, mas faltam filtros operacionais.",
"Resposta grande demais reduz auditabilidade humana.",
"implementar summaryOnly, failuresOnly, limit, cursor e includeRaw=false",
("chamar modo compact", "comparar com modo full", "validar limites"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Adicionar resposta compacta e paginada para auditoria",
),
check(
"identity.actor-scope",
GovernanceAxis.IDENTITY_SCOPE,
GovernanceDomain.IDENTITY,
"Ator, organizacao e escopo claros",
"A acao humana precisa saber quem chamou, em nome de quem e com qual permissao.",
("actorId", "organizationId", "tenant", "scope", "role", "identity"),
("usuario desconhecido", "test_user_not_found", "scope ausente"),
("actor", "organization"),
("identity", "rbac", "organizationId", "tenant"),
("identity", "business", "integracoes", "customer_ops", "compliance"),
"A plataforma nao executa acao por usuario ou organizacao nesta etapa.",
"Ator, organizacao ou escopo aparecem no contrato.",
"Ha identidade parcial, mas falta cadeia completa de ator/tenant/role.",
"Sem ator e escopo, a acao humana fica insegura.",
"amarrar user, organization, role, scope, tenant e impersonation controlado",
("criar usuario teste", "vincular organizacao", "validar negacao RBAC"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Consolidar ator, organizacao e escopo Identity",
"alta",
),
check(
"identity.rbac-deny",
GovernanceAxis.IDENTITY_SCOPE,
GovernanceDomain.IDENTITY,
"Matriz RBAC com negacao provada",
"Maturidade real exige provar permissoes negadas, nao apenas caminhos felizes.",
("rbac", "deny", "forbidden", "403", "permission", "matriz de negacao"),
("sem negacao", "permissao aberta", "allow all"),
(),
("rbac", "403", "permission"),
("identity", "business", "integracoes", "ui", "mcps"),
"A plataforma nao possui acao sensivel nesta etapa.",
"Sinais de RBAC e negacao foram detectados.",
"RBAC aparece, mas faltam cenarios de negacao.",
"Sem negacao provada, o controle de acesso nao e auditavel.",
"criar matriz de permissao com allowed/denied e testes por perfil",
("executar deny case", "registrar payload sanitizado", "validar auditId"),
GovernanceSeverity.HIGH,
False,
OrderType.EXECUTIVE,
"Adicionar matriz RBAC de negacao e evidencia",
"alta",
),
check(
"business.entitlement",
GovernanceAxis.BUSINESS_GATE,
GovernanceDomain.BUSINESS,
"Entitlement e plano como fonte comercial",
"Produto vendavel precisa consultar Business para plano, limite, franquia e bloqueio.",
("entitlement", "plano", "quota", "franquia", "bloqueio", "checkout", "billing"),
("sem entitlement", "regra comercial propria", "plano divergente"),
("entitlement",),
("Business", "entitlement", "billing", "quota"),
SELLABLE_PLATFORMS,
"A plataforma nao tem oferta vendavel direta nesta leitura.",
"Sinais de entitlement/plano foram detectados.",
"Ha readiness comercial parcial, mas falta fonte unica Business.",
"Sem entitlement, produto pode ser vendido ou liberado incorretamente.",
"conectar plano, produto, usage, quota, bloqueio e liberacao via Business",
("consultar entitlement", "simular bloqueio", "validar consumo"),
GovernanceSeverity.HIGH,
True,
OrderType.MANAGERIAL,
"Consolidar Business como fonte comercial unica",
"alta",
),
check(
"business.blocker-isolation",
GovernanceAxis.BUSINESS_GATE,
GovernanceDomain.BUSINESS,
"Blocker isolado por produto",
"Um token pendente de Stripe ou provider nao deve contaminar telas e produtos independentes.",
("productId", "providerId", "blockerId", "isolado", "stage", "readiness por produto"),
("blocker global indevido", "contamina", "global blocker sem produto"),
(),
("productId", "blockerId", "providerId"),
SELLABLE_PLATFORMS,
"Nao ha produto vendavel com blocker comercial nesta plataforma.",
"Ha sinais de isolamento por produto/provider.",
"Blocker existe, mas falta separar impacto por produto.",
"Blocker global indevido reduz maturidade e confianca humana.",
"classificar blockers por productId/providerId/stage e impacto comercial",
("listar blockers", "validar produto independente", "gerar matriz de impacto"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Isolar blockers comerciais por produto e provider",
),
check(
"docs.canonical-read",
GovernanceAxis.DOCS_CANONICALITY,
GovernanceDomain.DOCS,
"Leitura Docs canonica ou excecao formal",
"Docs nao pode ficar como blocker ambiguo entre catalogOnly e fonte operacional.",
("responseReady", "canonical", "leitura canonica", "excecao formal", "catalogOnly deliberado"),
("catalogOnly precisa decisao", "catalogOnly blocker", "docs catalogOnly"),
("docs",),
("Docs", "catalogOnly", "responseReady"),
DOCS_RELATED,
"Docs nao e dependencia primaria desta plataforma.",
"Docs tem leitura canonica ou excecao formal detectada.",
"Docs aparece, mas a decisao catalogOnly/responseReady precisa ser formalizada.",
"Docs permanece como blocker documental ambiguo.",
"promover leitura Docs responseReady minima ou registrar excecao catalogOnly deliberada",
("consultar Docs", "registrar decisao", "atualizar readiness global"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Resolver Docs catalogOnly por leitura minima ou excecao formal",
"alta",
),
check(
"docs.contract-reconciliation",
GovernanceAxis.DOCS_CANONICALITY,
GovernanceDomain.DOCS,
"Contrato documentado reconciliado com operacao",
"Docs deve explicar contratos vivos, evidencias e guias sem virar copia solta.",
("contrato", "schema", "guia", "evidencia", "hash", "documento canonico"),
("documentacao divergente", "sem hash", "docs desatualizado"),
(),
("contrato", "hash", "guia", "evidencia"),
DOCS_RELATED,
"Nao ha dependencia documental explicita.",
"Contrato/documentacao aparece reconciliado.",
"Documentacao existe, mas falta hash ou reconciliacao com operacao.",
"Sem reconciliacao, Docs deixa de ser fonte confiavel.",
"registrar contrato, hash, guia e evidencia por plataforma em Docs",
("comparar contrato", "gerar hash", "atualizar guia"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Reconciliar Docs com contratos e evidencias operacionais",
),
check(
"integrations.byok-chain",
GovernanceAxis.BYOK_CREDENTIALS,
GovernanceDomain.INTEGRATIONS,
"Jornada BYOK ponta a ponta",
"Integrações precisam de usuario, organizacao, entitlement, credentialRef, smoke e consumo.",
("BYOK", "credentialRef", "organization", "entitlement", "smoke", "usage", "consumo"),
("test_user_not_found", "needs_token", "credential missing", "sem credentialRef"),
("credentialRef", "smoke"),
("BYOK", "credentialRef", "smoke", "usage"),
("integracoes", "business", "identity", "mcps"),
"A plataforma nao opera provider externo por tenant nesta etapa.",
"Cadeia BYOK possui sinais fortes.",
"BYOK aparece, mas faltam uma ou mais etapas da cadeia.",
"BYOK incompleto impede autosservico real.",
"executar fluxo organizacao, usuario, entitlement, credentialRef, smoke, consumo e auditoria",
("criar organizacao", "criar usuario", "gerar credentialRef", "executar smoke readonly"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Provar jornada BYOK ponta a ponta sem vazamento",
"alta",
),
check(
"integrations.credential-redaction",
GovernanceAxis.SECRET_REDACTION,
GovernanceDomain.SECURITY,
"Segredo nunca exposto em relatorios",
"A plataforma deve usar refs e redaction para impedir vazamento em trace, audit e Markdown.",
("credentialRef", "redaction", "masked", "sanitized", "sem segredo", "secretRef"),
("secret=", "api_key", "token bruto", "password", "vazamento"),
("redaction",),
("REDACTION-CHECK", "credentialRef", "masked"),
("integracoes", "identity", "mcps", "compliance", "business"),
"Nao ha segredo operacional nesta plataforma.",
"Sinais de redaction/credentialRef foram detectados.",
"Ha refs seguras, mas falta prova automatica de nao vazamento.",
"Possivel segredo bruto ou ausencia de redaction.",
"executar varredura de segredo, mascaramento e politica de campos proibidos",
("rodar redaction check", "validar JSON", "inspecionar relatorios"),
GovernanceSeverity.CRITICAL,
True,
OrderType.EXECUTIVE,
"Blindar redaction e referencias de credencial",
"alta",
),
check(
"integrations.provider-stage",
GovernanceAxis.BYOK_CREDENTIALS,
GovernanceDomain.INTEGRATIONS,
"Stage operacional por provider",
"Cada provider precisa ficar em catalogOnly, sandbox, pilot, sellable ou blocked com criterio claro.",
("providerId", "stage", "catalogOnly", "sandbox", "pilot", "sellable", "blocked"),
("simulated_ready sem stage", "provider sem status", "maturidade indefinida"),
("provider", "stage"),
("providerId", "readiness", "stage"),
("integracoes", "business", "mcps"),
"Nao ha provider externo ligado a esta plataforma.",
"Stage por provider foi detectado.",
"Providers existem, mas falta uma regua de maturidade comercial/tecnica.",
"Provider sem stage claro pode ser vendido antes da hora.",
"classificar cada provider em lifecycle comercial e tecnico unico",
("listar providers", "atribuir stage", "validar blockers por provider"),
GovernanceSeverity.HIGH,
False,
OrderType.MANAGERIAL,
"Normalizar maturidade comercial por provider",
"alta",
),
check(
"support.diagnostic-next-action",
GovernanceAxis.SUPPORT_DIAGNOSTICS,
GovernanceDomain.SUPPORT,
"Diagnostico de suporte com proxima acao",
"Quando falha, suporte precisa saber origem, impacto e proximo passo sem expor segredo.",
("diagnostic", "diagnostico", "nextAction", "ticket", "incident", "support", "handoff"),
("erro bruto", "stack trace para cliente", "sem proxima acao"),
("diagnostic",),
("diagnostic", "ticket", "nextAction"),
SUPPORT_PLATFORMS,
"Esta plataforma nao possui fluxo de suporte direto nesta etapa.",
"Diagnostico ou nextAction foi detectado.",
"Ha suporte parcial, mas falta proxima acao padronizada.",
"Sem diagnostico, suporte humano fica bloqueado.",
"criar diagnostico sanitizado com causa, impacto, proxima acao e evidencia",
("simular falha", "validar nextAction", "abrir ticket de exemplo"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Adicionar diagnostico sanitizado para suporte",
),
check(
"support.incident-lifecycle",
GovernanceAxis.INCIDENT_LIFECYCLE,
GovernanceDomain.CUSTOMER_OPS,
"Ciclo de incidente rastreavel",
"Incidentes devem abrir, atualizar, resolver, auditar e virar aprendizagem operacional.",
("incident", "incidente", "status", "resolved", "handoff", "sla", "timeline"),
("incidente sem fechamento", "sem historico", "sem status"),
(),
("incident", "timeline", "SLA"),
("customer_ops", "identity", "integracoes", "business", "mcps"),
"Nao ha fluxo de incidente esperado nesta plataforma.",
"Ciclo de incidente foi detectado.",
"Ha incidentes, mas falta timeline ou fechamento.",
"Incidente sem ciclo completo prejudica atendimento e auditoria.",
"implementar ciclo aberto/em andamento/resolvido com evidenceId e owner",
("abrir incidente", "atualizar status", "fechar com evidencia"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Consolidar lifecycle de incidentes e handoffs",
),
check(
"observability.audit-trace",
GovernanceAxis.AUDIT_EVIDENCE,
GovernanceDomain.OBSERVABILITY,
"Trace, audit e evidenceId",
"Toda acao relevante precisa de trace, audit e evidencia sem conteudo sensivel.",
("traceId", "auditId", "evidenceId", "audit", "trace", "evidencia"),
("sem audit", "sem trace", "trace bruto", "audit ausente"),
("audit", "trace"),
("traceId", "auditId", "evidenceId"),
ALL_PLATFORMS,
"A plataforma nao executa acao auditavel nesta leitura.",
"Trace/audit/evidencia aparecem nos sinais.",
"Observabilidade existe, mas falta padrao auditId/evidenceId.",
"Sem trace e audit a entrega nao fica verificavel.",
"padronizar traceId, auditId, evidenceId, latency e status por chamada",
("executar smoke", "capturar auditId", "validar redaction"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Padronizar trace, audit e evidenceId",
"alta",
),
check(
"observability.health-readiness",
GovernanceAxis.READINESS_HEALTH,
GovernanceDomain.OBSERVABILITY,
"Health e readiness vivos",
"A pessoa precisa saber se a plataforma esta pronta, parcial ou bloqueada agora.",
("health", "readiness", "ready", "status", "ok", "blocked"),
("health missing", "readiness ausente", "status desconhecido"),
("health", "readiness"),
("health", "readiness", "status"),
ALL_PLATFORMS,
"Nao ha runtime vivo esperado nesta plataforma.",
"Health/readiness foram detectados.",
"Ha health ou readiness parcial, mas falta classificacao de blocker.",
"Sem health/readiness a leitura humana vira inferencia.",
"publicar health/readiness com blockers classificados e timestamp",
("chamar health", "chamar readiness", "registrar status"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Publicar health/readiness com blockers classificados",
),
check(
"observability.error-classification",
GovernanceAxis.AUDIT_EVIDENCE,
GovernanceDomain.OBSERVABILITY,
"Erro classificado e sanitizado",
"Falhas devem ser classificadas por dominio e expostas sem segredo bruto.",
("errorCode", "classification", "classified", "sanitized", "safeError", "nextAction"),
("erro bruto", "unhandled", "stack trace", "sem classificacao"),
(),
("errorCode", "safeError", "classification"),
ALL_PLATFORMS,
"Nao ha chamada operacional com erro esperado.",
"Classificacao de erro aparece na evidencia.",
"Erro e diagnosticado parcialmente, mas falta taxonomia.",
"Erro sem classificacao reduz suporte e auditoria.",
"criar taxonomia de erro por dominio com mensagem humana e payload redigido",
("forcar erro", "validar safeError", "checar ausencia de token"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Classificar erros e mensagens humanas seguras",
),
check(
"cloud.wrangler-reference",
GovernanceAxis.CLOUD_OPERATION,
GovernanceDomain.CLOUD,
"Wrangler como referencia operacional Cloudflare",
"Deploy, rotas, secrets, bindings, logs e health checks devem usar wrangler, nao o plugin.",
("wrangler", "workers", "routes", "bindings", "secrets", "deploy"),
("plugin Cloudflare como bloqueio", "usar plugin para deploy", "plugin substitui wrangler"),
(),
("wrangler.toml", "wrangler deploy", "wrangler secret"),
CLOUD_RELATED,
"A plataforma nao possui Worker/Cloudflare nesta leitura.",
"Wrangler ou Worker aparecem como referencia operacional.",
"Cloudflare aparece, mas wrangler precisa ser declarado como via operacional.",
"Sem wrangler, diagnostico Cloudflare fica fraco ou dependente do plugin.",
"documentar e validar rotas, deploy, secrets, bindings, logs e health por wrangler",
("wrangler whoami quando aplicavel", "wrangler deploy dry run", "wrangler tail/route check"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Formalizar wrangler como via operacional Cloudflare",
),
check(
"cloud.plugin-exception",
GovernanceAxis.CLOUD_OPERATION,
GovernanceDomain.CLOUD,
"Falha do plugin Cloudflare tratada como excecao esperada",
"A negativa do plugin nao pode virar bloqueio nem desculpa para parar OS.",
("plugin Cloudflare", "user rejected MCP tool call", "negado", "esperada", "nao e blocker"),
("plugin cloudflare bloqueou", "parar por plugin", "diagnostico relevante do plugin"),
(),
("AUDITORIA", "PENDENCIAS", "EXECUTADO"),
CLOUD_RELATED,
"A plataforma nao usa Cloudflare nesta leitura.",
"A excecao do plugin foi tratada corretamente.",
"Plugin foi mencionado, mas precisa ficar claro que nao bloqueia.",
"Plugin Cloudflare foi tratado como blocker indevido.",
"registrar tentativa do plugin apenas como premissa e seguir trabalho por wrangler quando houver acao real",
("registrar tentativa", "validar que pendencias nao citam plugin como blocker", "usar wrangler para trabalho real"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Auditar tratamento correto da falha esperada do plugin Cloudflare",
),
check(
"data.truth-state",
GovernanceAxis.DATA_TRUTH,
GovernanceDomain.GOVERNANCE,
"truthState e fonte da verdade explicitos",
"O painel humano e o GPT precisam saber se dado e real, simulado, fixture, stale ou parcial.",
("truthState", "dataTruth", "realData", "simulated", "stale", "partial", "sourceOfTruth"),
("truthIssues", "fonte da verdade ausente", "sampleData indefinido"),
("truth",),
("truthState", "sourceOfTruth", "dataTruth"),
ALL_PLATFORMS,
"Nao ha dado operacional exposto nesta plataforma.",
"Fonte da verdade ou truthState foi detectado.",
"Ha dados, mas falta classificar real/simulado/stale/partial.",
"Sem truthState, pessoas podem confundir dado simulado com real.",
"classificar cada resposta como real, simulatedInstitutional, fixture, stale, partial ou blocked",
("validar payload", "comparar estado", "registrar truthState"),
GovernanceSeverity.HIGH,
True,
OrderType.EXECUTIVE,
"Classificar truthState e fonte da verdade dos dados",
"alta",
),
check(
"data.stale-safe",
GovernanceAxis.DATA_TRUTH,
GovernanceDomain.OBSERVABILITY,
"Snapshot stale-safe para leitura humana",
"Painel e GPT devem poder ler ultimo estado valido se uma dependencia estiver sobrecarregada.",
("snapshot", "stale", "expiresAt", "staleAfter", "lastSuccessful", "cache"),
("d1 overload sem fallback", "sem snapshot", "sem stale"),
(),
("snapshot", "staleAfter", "expiresAt"),
("mcps", "ui", "business", "integracoes", "identity"),
"Nao ha dependencia pesada ou tela de leitura nesta plataforma.",
"Snapshot/stale-safe foi detectado.",
"Ha snapshot, mas falta TTL ou marcacao stale.",
"Sem stale-safe, painel pode cair por dependencia lenta.",
"materializar ultimo snapshot valido com TTL, stale=true e evidenceId",
("simular dependencia lenta", "verificar stale=true", "comparar hash"),
GovernanceSeverity.MEDIUM,
False,
OrderType.EXECUTIVE,
"Adicionar snapshot stale-safe para telas humanas",
),
check(
"release.gates",
GovernanceAxis.RELEASE_GOVERNANCE,
GovernanceDomain.GOVERNANCE,
"Gates de release e rollback",
"Projetos duradouros precisam de gate, rollback e criterio de promocao.",
("release", "rollback", "gate", "promotion", "migration", "breakingChanges"),
("sem rollback", "sem gate", "deploy sem criterio"),
(),
("release", "rollback", "gate"),
ALL_PLATFORMS,
"Nao ha release operacional nesta plataforma.",
"Gates de release/rollback foram detectados.",
"Ha release parcial, mas falta rollback ou criterio formal.",
"Sem gate de release, mudancas podem quebrar contrato humano.",
"criar gate de release com smoke, contrato, rollback, compatibilidade e evidencia",
("validar smoke", "validar contrato", "registrar rollback"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Formalizar gates de release e rollback",
),
check(
"commercial.sellable-stage",
GovernanceAxis.COMMERCIAL_MATURITY,
GovernanceDomain.BUSINESS,
"Stage vendavel controlado",
"Produto humano precisa separar catalogado, sandbox, piloto controlado e venda geral.",
("sellable", "pilot", "controlled", "catalog-ready", "commercialReady", "produto vendavel"),
("vendavel sem smoke", "sellable simulated", "stage comercial indefinido"),
("stage",),
("commercialReady", "sellable", "pilot"),
SELLABLE_PLATFORMS,
"Nao ha produto vendavel direto nesta plataforma.",
"Stage comercial aparece classificado.",
"Produto tem valor comercial, mas falta stage operacional claro.",
"Produto pode ser vendido sem prontidao real.",
"criar regua catalog-ready, credential-ready, smoke-ready, pilot-controlled e sellable",
("classificar produto", "validar smoke", "registrar limite comercial"),
GovernanceSeverity.HIGH,
False,
OrderType.MANAGERIAL,
"Separar maturidade comercial e tecnica por produto",
"alta",
),
check(
"human.profile-value",
GovernanceAxis.HUMAN_VALUE,
GovernanceDomain.HUMAN_EXPERIENCE,
"Valor humano por perfil explicitado",
"Cada plataforma deve dizer quem atende, como atende, o que falta e qual proxima acao.",
("perfil humano", "quem atende", "matriz", "playbook", "perguntas humanas", "metas humanas"),
("tecnico demais", "sem perfil", "sem matriz"),
("perfil", "matriz"),
("matriz-plataforma-perfil", "playbooks-humanos", "perguntas-humanas"),
ALL_PLATFORMS,
"Nao ha relatorio humano esperado para esta plataforma.",
"Valor por perfil foi detectado.",
"Ha leitura humana, mas falta conectar a perfil/pergunta/acao.",
"Sem valor por perfil, a plataforma parece apenas tecnica.",
"gerar matriz plataforma x perfil com perguntas, lacunas, evidencias e OS",
("regenerar matriz", "comparar perfis fracos", "criar OS vinculada"),
GovernanceSeverity.MEDIUM,
False,
OrderType.MANAGERIAL,
"Elevar valor humano por perfil e pergunta",
),
)
CHECK_BY_ID = {template.check_id: template for template in CHECK_TEMPLATES}
DOMAIN_TO_PRIMARY_PLATFORM: dict[GovernanceDomain, str] = {
GovernanceDomain.MCP: "mcps",
GovernanceDomain.IDENTITY: "identity",
GovernanceDomain.BUSINESS: "business",
GovernanceDomain.DOCS: "docs",
GovernanceDomain.INTEGRATIONS: "integracoes",
GovernanceDomain.COMPLIANCE: "compliance",
GovernanceDomain.FINANCE: "finance",
GovernanceDomain.CUSTOMER_OPS: "customer_ops",
GovernanceDomain.UI: "ui",
GovernanceDomain.CLOUD: "integracoes",
GovernanceDomain.SECURITY: "identity",
GovernanceDomain.OBSERVABILITY: "mcps",
GovernanceDomain.SUPPORT: "customer_ops",
GovernanceDomain.GOVERNANCE: "mcps",
GovernanceDomain.HUMAN_EXPERIENCE: "ui",
GovernanceDomain.REPOSITORY: "platform_base",
GovernanceDomain.DOCUMENTATION: "docs",
GovernanceDomain.CONTRACT: "platform_base",
GovernanceDomain.TESTS: "platform_base",
GovernanceDomain.RUNTIME: "platform_base",
}
def templates_for_platform(platform_id: str) -> tuple[CheckTemplate, ...]:
return tuple(template for template in CHECK_TEMPLATES if template.applies_to_platform(platform_id))
def templates_for_domain(domain: GovernanceDomain) -> tuple[CheckTemplate, ...]:
return tuple(template for template in CHECK_TEMPLATES if template.domain == domain)
def core_template_ids() -> tuple[str, ...]:
return tuple(template.check_id for template in CHECK_TEMPLATES if template.required_for_human_ready)

View File

@@ -0,0 +1,203 @@
"""Snapshot and diff helpers for governance portfolios."""
from __future__ import annotations
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Mapping, Sequence
from .governance_models import EcosystemGovernancePortfolio
from .models import as_plain_data, utc_now
@dataclass(slots=True)
class GovernancePlatformSnapshot:
platform_id: str
status_label: str
governance_score: int
human_score: int
maturity: str
blocker_count: int
warning_count: int
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class GovernanceSnapshot:
generated_at: str
average_governance_score: int
platforms: tuple[GovernancePlatformSnapshot, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class GovernanceDelta:
platform_id: str
before_status: str
after_status: str
score_delta: int
blocker_delta: int
warning_delta: int
summary: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def snapshot_from_portfolio(portfolio: EcosystemGovernancePortfolio) -> GovernanceSnapshot:
return GovernanceSnapshot(
generated_at=utc_now(),
average_governance_score=portfolio.average_governance_score,
platforms=tuple(
GovernancePlatformSnapshot(
platform_id=card.platform_id,
status_label=card.status_label,
governance_score=card.governance_score,
human_score=card.human_score,
maturity=card.maturity.value,
blocker_count=len(card.blockers),
warning_count=len(card.warnings),
)
for card in sorted(portfolio.cards, key=lambda item: item.platform_id)
),
)
def write_governance_snapshot(path: Path, snapshot: GovernanceSnapshot) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(snapshot.to_dict(), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
return path
def load_governance_snapshot(path: Path) -> GovernanceSnapshot | None:
if not path.exists():
return None
try:
data = json.loads(path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
return None
platforms = []
for item in data.get("platforms", []):
if not isinstance(item, dict):
continue
platforms.append(
GovernancePlatformSnapshot(
platform_id=str(item.get("platform_id", "")),
status_label=str(item.get("status_label", "")),
governance_score=int(item.get("governance_score", 0)),
human_score=int(item.get("human_score", 0)),
maturity=str(item.get("maturity", "")),
blocker_count=int(item.get("blocker_count", 0)),
warning_count=int(item.get("warning_count", 0)),
)
)
return GovernanceSnapshot(
generated_at=str(data.get("generated_at", "")),
average_governance_score=int(data.get("average_governance_score", 0)),
platforms=tuple(platforms),
)
def platform_map(snapshot: GovernanceSnapshot | None) -> Mapping[str, GovernancePlatformSnapshot]:
if snapshot is None:
return {}
return {platform.platform_id: platform for platform in snapshot.platforms}
def diff_governance_snapshots(before: GovernanceSnapshot | None, after: GovernanceSnapshot) -> tuple[GovernanceDelta, ...]:
before_map = platform_map(before)
deltas: list[GovernanceDelta] = []
for current in after.platforms:
previous = before_map.get(current.platform_id)
if previous is None:
deltas.append(
GovernanceDelta(
platform_id=current.platform_id,
before_status="novo",
after_status=current.status_label,
score_delta=current.governance_score,
blocker_delta=current.blocker_count,
warning_delta=current.warning_count,
summary="Plataforma entrou no snapshot de governanca.",
)
)
continue
score_delta = current.governance_score - previous.governance_score
blocker_delta = current.blocker_count - previous.blocker_count
warning_delta = current.warning_count - previous.warning_count
changed = (
previous.status_label != current.status_label
or score_delta != 0
or blocker_delta != 0
or warning_delta != 0
)
if not changed:
continue
direction = "melhorou" if score_delta > 0 and blocker_delta <= 0 else "piorou" if score_delta < 0 or blocker_delta > 0 else "mudou"
deltas.append(
GovernanceDelta(
platform_id=current.platform_id,
before_status=previous.status_label,
after_status=current.status_label,
score_delta=score_delta,
blocker_delta=blocker_delta,
warning_delta=warning_delta,
summary=f"Governanca {direction}: score {score_delta}, blockers {blocker_delta}, warnings {warning_delta}.",
)
)
before_ids = set(before_map)
after_ids = {platform.platform_id for platform in after.platforms}
for removed in sorted(before_ids - after_ids):
previous = before_map[removed]
deltas.append(
GovernanceDelta(
platform_id=removed,
before_status=previous.status_label,
after_status="removido",
score_delta=-previous.governance_score,
blocker_delta=-previous.blocker_count,
warning_delta=-previous.warning_count,
summary="Plataforma saiu do snapshot de governanca.",
)
)
return tuple(sorted(deltas, key=lambda item: (item.platform_id, item.summary)))
def governance_delta_markdown(deltas: Sequence[GovernanceDelta]) -> str:
lines = ["# Delta de governanca operacional", ""]
if not deltas:
lines.append("- Nenhuma alteracao de governanca detectada contra snapshot anterior.")
return "\n".join(lines) + "\n"
for delta in deltas:
lines.append(f"## {delta.platform_id}")
lines.append("")
lines.append(f"- status anterior: `{delta.before_status}`")
lines.append(f"- status atual: `{delta.after_status}`")
lines.append(f"- delta score: `{delta.score_delta}`")
lines.append(f"- delta blockers: `{delta.blocker_delta}`")
lines.append(f"- delta warnings: `{delta.warning_delta}`")
lines.append(f"- leitura: {delta.summary}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def governance_delta_rows(deltas: Sequence[GovernanceDelta]) -> list[list[str]]:
rows = [["platform", "before_status", "after_status", "score_delta", "blocker_delta", "warning_delta", "summary"]]
for delta in deltas:
rows.append(
[
delta.platform_id,
delta.before_status,
delta.after_status,
str(delta.score_delta),
str(delta.blocker_delta),
str(delta.warning_delta),
delta.summary,
]
)
return rows

View File

@@ -0,0 +1,775 @@
"""Governance evaluation engine for the Mais Humana platform."""
from __future__ import annotations
import csv
import io
from pathlib import Path
from typing import Iterable, Mapping, Sequence
from .catalog import PLATFORM_BY_ID
from .governance_catalog import CHECK_TEMPLATES, DOMAIN_TO_PRIMARY_PLATFORM, templates_for_platform
from .governance_models import (
CheckTemplate,
EcosystemGovernancePortfolio,
GovernanceAxis,
GovernanceCheckResult,
GovernanceDomain,
GovernanceEvidence,
GovernanceEvidenceKind,
GovernanceMaturity,
GovernanceOrderCandidate,
GovernanceSeverity,
GovernanceStatus,
PlatformGovernanceCard,
dedupe_candidates,
score_to_maturity,
severity_rank,
status_score,
)
from .models import EvidenceKind, OrderType, PlatformHumanReport, Recommendation, as_plain_data, merge_unique, slugify, utc_now
from .operational_models import ExecutionRoundDossier, PlatformOperationalDossier, ReadinessGate, SourceReference
def lower_terms(values: Iterable[str]) -> tuple[str, ...]:
return tuple(str(value).lower() for value in values if str(value).strip())
def contains_term(text: str, term: str) -> bool:
if not term:
return False
return term.lower() in text
def count_terms(text: str, terms: Iterable[str]) -> int:
lowered = text.lower()
return sum(1 for term in lower_terms(terms) if contains_term(lowered, term))
def matching_terms(text: str, terms: Iterable[str]) -> tuple[str, ...]:
lowered = text.lower()
return tuple(term for term in lower_terms(terms) if contains_term(lowered, term))
def corpus_for_report(
report: PlatformHumanReport,
recommendations: Sequence[Recommendation] = (),
dossier: PlatformOperationalDossier | None = None,
extra_text: Sequence[str] = (),
) -> str:
parts: list[str] = [
report.platform.platform_id,
report.platform.title,
report.platform.mission,
report.scan.repo_path,
report.scan.readme_excerpt,
" ".join(report.platform.expected_surfaces),
" ".join(report.platform.known_blockers),
" ".join(report.scan.warnings),
]
for evidence in report.scan.evidence[:400]:
parts.append(evidence.path)
parts.append(evidence.summary)
parts.append(" ".join(evidence.tags))
parts.append(evidence.kind.value)
for recommendation in recommendations:
if recommendation.platform_id == report.platform.platform_id:
parts.extend(
[
recommendation.recommendation_id,
recommendation.title,
recommendation.reason,
recommendation.expected_impact,
" ".join(path for path in recommendation.affected_paths),
]
)
if dossier is not None:
parts.extend([dossier.stage.value, dossier.status_label, dossier.primary_action])
for signal in dossier.signals:
parts.extend(
[
signal.signal_id,
signal.title,
signal.summary,
signal.next_action,
signal.kind.value,
signal.domain.value,
" ".join(signal.tags),
]
)
for source in signal.sources:
parts.append(source.path)
parts.append(source.summary)
for gate in dossier.gates:
parts.extend([gate.gate_id, gate.title, gate.outcome.value, gate.reason, gate.next_action])
parts.extend(extra_text)
return "\n".join(part for part in parts if part).lower()
def evidence_kind_from_scan(kind: EvidenceKind) -> GovernanceEvidenceKind:
if kind == EvidenceKind.TEST:
return GovernanceEvidenceKind.TEST
if kind == EvidenceKind.OPENAPI:
return GovernanceEvidenceKind.CONTRACT
if kind in {EvidenceKind.README, EvidenceKind.DOC}:
return GovernanceEvidenceKind.DOCUMENTATION
if kind in {EvidenceKind.CONFIG, EvidenceKind.WORKER, EvidenceKind.STORAGE}:
return GovernanceEvidenceKind.CONFIG
if kind in {EvidenceKind.ROUTE, EvidenceKind.MCP_TOOL, EvidenceKind.UI_SURFACE}:
return GovernanceEvidenceKind.CODE
if kind in {EvidenceKind.SECURITY, EvidenceKind.OBSERVABILITY, EvidenceKind.BUSINESS_RULE}:
return GovernanceEvidenceKind.RUNTIME
return GovernanceEvidenceKind.DERIVED
def evidence_from_source(source: SourceReference) -> GovernanceEvidence:
return GovernanceEvidence(
path=source.path,
line=source.line,
summary=source.summary,
kind=GovernanceEvidenceKind.DERIVED,
confidence=0.7,
)
def evidence_from_gate(gate: ReadinessGate) -> tuple[GovernanceEvidence, ...]:
return tuple(evidence_from_source(source) for source in gate.evidence)
def evidence_from_report_terms(report: PlatformHumanReport, terms: Sequence[str], limit: int = 8) -> tuple[GovernanceEvidence, ...]:
matches: list[GovernanceEvidence] = []
lowered_terms = lower_terms(terms)
if not lowered_terms:
return ()
for evidence in report.scan.evidence:
text = f"{evidence.path} {evidence.summary} {' '.join(evidence.tags)} {evidence.kind.value}".lower()
if any(term in text for term in lowered_terms):
matches.append(
GovernanceEvidence(
path=evidence.path,
line=evidence.line,
summary=evidence.summary,
kind=evidence_kind_from_scan(evidence.kind),
confidence=evidence.confidence,
)
)
if len(matches) >= limit:
return tuple(matches)
for warning in report.scan.warnings:
text = warning.lower()
if any(term in text for term in lowered_terms):
matches.append(
GovernanceEvidence(
path=report.scan.repo_path,
summary=warning,
kind=GovernanceEvidenceKind.DERIVED,
confidence=0.65,
)
)
if len(matches) >= limit:
break
return tuple(matches)
def direct_status_for_template(
report: PlatformHumanReport,
template: CheckTemplate,
dossier: PlatformOperationalDossier | None,
) -> GovernanceStatus | None:
check_id = template.check_id
if check_id == "repository.exists":
return GovernanceStatus.PASS if report.scan.exists else GovernanceStatus.BLOCKED
if check_id == "repository.git-traceability":
return GovernanceStatus.PASS if report.scan.git_present else GovernanceStatus.FAIL
if check_id == "documentation.human-readme":
return GovernanceStatus.PASS if report.scan.readme_excerpt else GovernanceStatus.ATTENTION
if check_id == "tests.detected":
return GovernanceStatus.PASS if report.scan.has_tests else GovernanceStatus.ATTENTION
if check_id == "contract.openapi-or-equivalent":
return GovernanceStatus.PASS if report.scan.has_openapi else GovernanceStatus.ATTENTION
if check_id == "docs.canonical-read" and report.platform.platform_id == "docs":
blocker_text = " ".join(report.platform.known_blockers).lower()
if "catalogonly" in blocker_text or "catalog_only" in blocker_text:
return GovernanceStatus.BLOCKED
if check_id == "panel.same-source" and dossier is not None:
if dossier.same_source_ready:
return GovernanceStatus.PASS
if dossier.panel_ready:
return GovernanceStatus.ATTENTION
if check_id == "panel.backend-mcp" and dossier is not None:
if dossier.panel_ready:
return GovernanceStatus.PASS
if check_id == "observability.audit-trace" and dossier is not None:
if any(gate.domain.value in {"observability", "governance"} and gate.passed for gate in dossier.gates):
return GovernanceStatus.PASS
return None
def choose_status(
template: CheckTemplate,
positive_hits: int,
negative_hits: int,
required_hits: int,
direct: GovernanceStatus | None,
) -> GovernanceStatus:
if direct is not None:
return direct
if negative_hits and template.check_id == "cloud.plugin-exception":
return GovernanceStatus.FAIL
if positive_hits and template.check_id == "cloud.plugin-exception":
return GovernanceStatus.EXCEPTION
if negative_hits:
if template.required_for_human_ready or template.severity in {GovernanceSeverity.HIGH, GovernanceSeverity.CRITICAL}:
return GovernanceStatus.BLOCKED
return GovernanceStatus.FAIL
if template.required_terms and required_hits < len(template.required_terms):
if positive_hits:
return GovernanceStatus.ATTENTION
if template.required_for_human_ready:
return GovernanceStatus.FAIL
return GovernanceStatus.ATTENTION
if positive_hits:
return GovernanceStatus.PASS
if template.required_for_human_ready:
return GovernanceStatus.ATTENTION
return GovernanceStatus.ATTENTION
def result_severity(template: CheckTemplate, status: GovernanceStatus) -> GovernanceSeverity:
if status in {GovernanceStatus.PASS, GovernanceStatus.EXCEPTION, GovernanceStatus.NOT_APPLICABLE}:
return GovernanceSeverity.INFO
if status == GovernanceStatus.BLOCKED:
if template.severity in {GovernanceSeverity.HIGH, GovernanceSeverity.CRITICAL}:
return template.severity
return GovernanceSeverity.HIGH
if status == GovernanceStatus.FAIL:
return template.severity
if status == GovernanceStatus.ATTENTION:
if template.severity == GovernanceSeverity.CRITICAL:
return GovernanceSeverity.HIGH
return template.severity
return GovernanceSeverity.INFO
def reason_for_status(template: CheckTemplate, status: GovernanceStatus, positive: Sequence[str], negative: Sequence[str]) -> str:
if status == GovernanceStatus.PASS:
return template.pass_summary
if status == GovernanceStatus.EXCEPTION:
return template.pass_summary
if status == GovernanceStatus.NOT_APPLICABLE:
return template.not_applicable_reason
if status == GovernanceStatus.ATTENTION:
detail = f" Termos encontrados: {', '.join(positive[:5])}." if positive else ""
return template.attention_summary + detail
if status in {GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}:
detail = f" Sinais negativos: {', '.join(negative[:5])}." if negative else ""
return template.fail_summary + detail
return template.attention_summary
def check_score(status: GovernanceStatus, positive_hits: int, required_hits: int, negative_hits: int) -> int:
base = status_score(status)
if status == GovernanceStatus.NOT_APPLICABLE:
return 100
bonus = min(10, positive_hits * 2 + required_hits * 3)
penalty = min(30, negative_hits * 8)
return max(0, min(100, base + bonus - penalty))
def evaluate_template(
report: PlatformHumanReport,
template: CheckTemplate,
recommendations: Sequence[Recommendation] = (),
dossier: PlatformOperationalDossier | None = None,
extra_text: Sequence[str] = (),
) -> GovernanceCheckResult:
if not template.applies_to_platform(report.platform.platform_id):
return GovernanceCheckResult(
check_id=template.check_id,
platform_id=report.platform.platform_id,
axis=template.axis,
domain=template.domain,
title=template.title,
status=GovernanceStatus.NOT_APPLICABLE,
severity=GovernanceSeverity.INFO,
maturity=GovernanceMaturity.INSTITUTIONAL,
score=100,
reason=template.not_applicable_reason,
next_action="manter monitoramento se a relacao passar a existir",
evidence=(),
validation_steps=(),
order_title=template.normalized_order_title,
order_type=template.creates_order_type,
order_priority=template.order_priority,
required_for_human_ready=False,
)
corpus = corpus_for_report(report, recommendations, dossier, extra_text)
positive = matching_terms(corpus, template.positive_terms)
negative = matching_terms(corpus, template.negative_terms)
required = matching_terms(corpus, template.required_terms)
direct = direct_status_for_template(report, template, dossier)
status = choose_status(template, len(positive), len(negative), len(required), direct)
severity = result_severity(template, status)
score = check_score(status, len(positive), len(required), len(negative))
evidence_terms = tuple(template.evidence_hints) + positive + negative + required
evidence = list(evidence_from_report_terms(report, evidence_terms))
if dossier is not None:
for gate in dossier.gates:
gate_text = f"{gate.gate_id} {gate.title} {gate.reason} {gate.next_action}".lower()
if template.domain.value in gate_text or any(term in gate_text for term in lower_terms(evidence_terms)):
evidence.extend(evidence_from_gate(gate))
if not evidence and status in {GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}:
evidence.append(
GovernanceEvidence(
path=report.scan.repo_path,
summary="Ausencia ou sinal negativo inferido pelo check de governanca.",
kind=GovernanceEvidenceKind.ABSENCE,
confidence=0.55,
)
)
reason = reason_for_status(template, status, positive, negative)
return GovernanceCheckResult(
check_id=template.check_id,
platform_id=report.platform.platform_id,
axis=template.axis,
domain=template.domain,
title=template.title,
status=status,
severity=severity,
maturity=score_to_maturity(score),
score=score,
reason=reason,
next_action=template.suggested_action if status != GovernanceStatus.PASS else "manter evidencia e regressao",
evidence=tuple(evidence[:10]),
validation_steps=template.validation_steps,
order_title=template.normalized_order_title,
order_type=template.creates_order_type,
order_priority=template.order_priority,
required_for_human_ready=template.required_for_human_ready,
)
def card_score(checks: Sequence[GovernanceCheckResult]) -> int:
applicable = [check for check in checks if check.status != GovernanceStatus.NOT_APPLICABLE]
if not applicable:
return 0
weighted_total = 0
total_weight = 0
for check in applicable:
weight = 2 if check.required_for_human_ready else 1
if check.severity in {GovernanceSeverity.HIGH, GovernanceSeverity.CRITICAL}:
weight += 1
weighted_total += check.score * weight
total_weight += weight
return round(weighted_total / total_weight) if total_weight else 0
def relation_summary(report: PlatformHumanReport) -> tuple[str, ...]:
lines: list[str] = []
for related in report.platform.related_platforms:
if related in PLATFORM_BY_ID:
lines.append(f"{report.platform.platform_id} depende ou conversa com {related}")
for category in report.platform.primary_categories:
lines.append(f"categoria primaria: {category.value}")
return merge_unique(lines)[:8]
def build_strengths(checks: Sequence[GovernanceCheckResult]) -> tuple[str, ...]:
strengths = [
f"{check.domain.value}: {check.title}"
for check in checks
if check.status in {GovernanceStatus.PASS, GovernanceStatus.EXCEPTION}
]
return merge_unique(strengths)[:10]
def build_next_actions(checks: Sequence[GovernanceCheckResult], limit: int = 10) -> tuple[str, ...]:
candidates = [
check.next_action
for check in sorted(checks, key=lambda item: (-severity_rank(item.severity), item.score, item.title))
if check.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}
]
return merge_unique(candidates)[:limit]
def build_platform_governance_card(
report: PlatformHumanReport,
recommendations: Sequence[Recommendation] = (),
dossier: PlatformOperationalDossier | None = None,
extra_text: Sequence[str] = (),
) -> PlatformGovernanceCard:
checks = tuple(
evaluate_template(report, template, recommendations, dossier, extra_text)
for template in templates_for_platform(report.platform.platform_id)
)
score = card_score(checks)
blockers = tuple(check for check in checks if check.is_blocking)
warnings = tuple(
check
for check in checks
if check.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL}
and not check.is_blocking
)
return PlatformGovernanceCard(
platform_id=report.platform.platform_id,
title=report.platform.title,
repo_path=report.scan.repo_path,
checks=checks,
human_score=report.average_score,
governance_score=score,
maturity=score_to_maturity(score),
blockers=blockers,
warnings=warnings,
strengths=build_strengths(checks),
next_actions=build_next_actions(checks),
relation_summary=relation_summary(report),
)
def candidate_from_check(check: GovernanceCheckResult, repo_path: str, index: int) -> GovernanceOrderCandidate:
prefix = "EXECUTIVA" if check.order_type == OrderType.EXECUTIVE else "GERENCIAL"
candidate_id = f"{index:04d}_{prefix}__{slugify(check.order_title)}"
return GovernanceOrderCandidate(
candidate_id=candidate_id,
platform_id=check.platform_id,
order_type=check.order_type,
title=check.order_title,
purpose=(
"Transformar check de governanca em continuidade executavel, "
"com evidencia, validacao e criterio humano de pronto."
),
reason=check.reason,
expected_result=check.next_action,
priority=check.order_priority,
affected_paths=(repo_path,),
validations=check.validation_steps,
source_check_ids=(check.check_id,),
)
def build_order_candidates(cards: Sequence[PlatformGovernanceCard]) -> tuple[GovernanceOrderCandidate, ...]:
candidates: list[GovernanceOrderCandidate] = []
index = 1
for card in sorted(cards, key=lambda item: (item.governance_score, item.platform_id)):
actionable = [
check
for check in card.checks
if check.needs_order and check.status != GovernanceStatus.NOT_APPLICABLE
]
actionable.sort(key=lambda item: (-severity_rank(item.severity), item.score, item.title))
for check in actionable[:5]:
candidates.append(candidate_from_check(check, card.repo_path, index))
index += 1
return dedupe_candidates(candidates)
def relation_matrix(cards: Sequence[PlatformGovernanceCard]) -> tuple[tuple[str, str, str], ...]:
rows: list[tuple[str, str, str]] = []
ids = {card.platform_id for card in cards}
for card in cards:
platform = PLATFORM_BY_ID.get(card.platform_id)
if platform is None:
continue
for related in platform.related_platforms:
relation = "declared" if related in ids else "external_or_missing"
rows.append((card.platform_id, related, relation))
for check in card.checks:
if check.status == GovernanceStatus.NOT_APPLICABLE:
continue
owner = DOMAIN_TO_PRIMARY_PLATFORM.get(check.domain)
if owner and owner != card.platform_id:
rows.append((card.platform_id, owner, f"domain:{check.domain.value}"))
unique: list[tuple[str, str, str]] = []
seen: set[tuple[str, str, str]] = set()
for row in rows:
if row in seen:
continue
seen.add(row)
unique.append(row)
return tuple(sorted(unique))
def executive_summary(cards: Sequence[PlatformGovernanceCard]) -> tuple[str, ...]:
total = len(cards)
average = round(sum(card.governance_score for card in cards) / total) if total else 0
blocked = sum(1 for card in cards if card.blockers)
controlled = sum(1 for card in cards if card.governance_score >= 82 and not card.blockers)
panel_related = sum(1 for card in cards if any(check.axis == GovernanceAxis.PANEL_BACKEND and check.status == GovernanceStatus.PASS for check in card.checks))
docs_blockers = sum(1 for card in cards if any(check.domain == GovernanceDomain.DOCS and check.is_blocking for check in card.checks))
return (
f"Plataformas avaliadas: {total}",
f"Score medio de governanca: {average}",
f"Plataformas bloqueadas por governanca: {blocked}",
f"Plataformas controladas/institucionais: {controlled}",
f"Plataformas com backend de painel detectado: {panel_related}",
f"Checks Docs bloqueantes: {docs_blockers}",
)
def managerial_summary(cards: Sequence[PlatformGovernanceCard]) -> tuple[str, ...]:
lines: list[str] = []
for card in sorted(cards, key=lambda item: (item.governance_score, item.platform_id)):
blockers = ", ".join(check.title for check in card.blockers[:3]) or "sem blocker"
action = card.next_actions[0] if card.next_actions else "manter regressao"
lines.append(
f"{card.platform_id}: status={card.status_label}; score={card.governance_score}; "
f"maturidade={card.maturity.value}; blockers={blockers}; acao={action}"
)
return tuple(lines)
def blockers_summary(cards: Sequence[PlatformGovernanceCard]) -> tuple[str, ...]:
blockers: list[str] = []
for card in cards:
for check in card.blockers:
blockers.append(f"{card.platform_id}: {check.title} - {check.next_action}")
return merge_unique(blockers)[:20]
def dossier_by_platform(round_dossier: ExecutionRoundDossier | None) -> Mapping[str, PlatformOperationalDossier]:
if round_dossier is None:
return {}
return {dossier.platform_id: dossier for dossier in round_dossier.platform_dossiers}
def build_governance_portfolio(
platform_reports: Sequence[PlatformHumanReport],
recommendations: Sequence[Recommendation] = (),
round_dossier: ExecutionRoundDossier | None = None,
extra_text: Sequence[str] = (),
) -> EcosystemGovernancePortfolio:
dossiers = dossier_by_platform(round_dossier)
cards = tuple(
build_platform_governance_card(
report,
recommendations=recommendations,
dossier=dossiers.get(report.platform.platform_id),
extra_text=extra_text,
)
for report in platform_reports
)
candidates = build_order_candidates(cards)
return EcosystemGovernancePortfolio(
project_id="tudo-para-ia-mais-humana",
cards=cards,
order_candidates=candidates,
executive_summary=executive_summary(cards),
managerial_summary=managerial_summary(cards),
blockers_summary=blockers_summary(cards),
relation_matrix=relation_matrix(cards),
)
def governance_portfolio_markdown(portfolio: EcosystemGovernancePortfolio) -> str:
lines = [
"# Governanca operacional Mais Humana",
"",
f"- project_id: `{portfolio.project_id}`",
f"- generated_at: `{portfolio.generated_at}`",
f"- score_medio: `{portfolio.average_governance_score}`",
"",
"## Sumario executivo",
"",
]
lines.extend(f"- {item}" for item in portfolio.executive_summary)
lines.extend(["", "## Sumario gerencial", ""])
lines.extend(f"- {item}" for item in portfolio.managerial_summary)
lines.extend(["", "## Blockers", ""])
if portfolio.blockers_summary:
lines.extend(f"- {item}" for item in portfolio.blockers_summary)
else:
lines.append("- Nenhum blocker de governanca consolidado.")
lines.extend(["", "## Plataformas", ""])
for card in sorted(portfolio.cards, key=lambda item: item.platform_id):
lines.append(f"### {card.platform_id}")
lines.append("")
lines.append(f"- status: `{card.status_label}`")
lines.append(f"- score_governanca: `{card.governance_score}`")
lines.append(f"- score_humano: `{card.human_score}`")
lines.append(f"- maturidade: `{card.maturity.value}`")
lines.append(f"- blockers: `{len(card.blockers)}`")
lines.append(f"- warnings: `{len(card.warnings)}`")
if card.next_actions:
lines.append(f"- proxima_acao: {card.next_actions[0]}")
lines.append("")
lines.append("Checks prioritarios:")
for check in sorted(card.checks, key=lambda item: (-severity_rank(item.severity), item.score, item.title))[:12]:
if check.status == GovernanceStatus.NOT_APPLICABLE:
continue
lines.append(
f"- `{check.status.value}` `{check.domain.value}` {check.title}: "
f"{check.reason} Proxima acao: {check.next_action}"
)
lines.append("")
lines.extend(["", "## Matriz de relacoes", ""])
for source, target, relation in portfolio.relation_matrix[:120]:
lines.append(f"- `{source}` -> `{target}` ({relation})")
return "\n".join(lines).strip() + "\n"
def governance_candidates_markdown(portfolio: EcosystemGovernancePortfolio, limit: int = 20) -> str:
lines = [
"# Candidatas de ordem por governanca",
"",
"As candidatas abaixo nascem de checks reais de governanca e devem virar OS apenas quando a pendencia nao puder ser resolvida nesta rodada.",
"",
]
if not portfolio.order_candidates:
lines.append("- Nenhuma candidata de ordem de governanca.")
return "\n".join(lines) + "\n"
for candidate in portfolio.order_candidates[:limit]:
lines.append(f"## {candidate.candidate_id}")
lines.append("")
lines.append(f"- tipo: `{candidate.order_type.value}`")
lines.append(f"- plataforma: `{candidate.platform_id}`")
lines.append(f"- prioridade: `{candidate.priority}`")
lines.append(f"- titulo: {candidate.title}")
lines.append(f"- motivo: {candidate.reason}")
lines.append(f"- resultado esperado: {candidate.expected_result}")
lines.append("- validacoes:")
for validation in candidate.validations:
lines.append(f" - {validation}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def governance_cards_rows(portfolio: EcosystemGovernancePortfolio) -> list[list[str]]:
rows = [["platform", "status", "governance_score", "human_score", "maturity", "blockers", "warnings", "next_action"]]
for card in sorted(portfolio.cards, key=lambda item: item.platform_id):
rows.append(
[
card.platform_id,
card.status_label,
str(card.governance_score),
str(card.human_score),
card.maturity.value,
str(len(card.blockers)),
str(len(card.warnings)),
card.next_actions[0] if card.next_actions else "",
]
)
return rows
def governance_checks_rows(portfolio: EcosystemGovernancePortfolio) -> list[list[str]]:
rows = [["platform", "check_id", "axis", "domain", "status", "severity", "score", "title", "next_action"]]
for card in sorted(portfolio.cards, key=lambda item: item.platform_id):
for check in sorted(card.checks, key=lambda item: (item.domain.value, item.axis.value, item.check_id)):
if check.status == GovernanceStatus.NOT_APPLICABLE:
continue
rows.append(
[
card.platform_id,
check.check_id,
check.axis.value,
check.domain.value,
check.status.value,
check.severity.value,
str(check.score),
check.title,
check.next_action,
]
)
return rows
def rows_to_csv(rows: Sequence[Sequence[str]]) -> str:
buffer = io.StringIO()
writer = csv.writer(buffer, lineterminator="\n")
writer.writerows(rows)
return buffer.getvalue()
def governance_cards_csv(portfolio: EcosystemGovernancePortfolio) -> str:
return rows_to_csv(governance_cards_rows(portfolio))
def governance_checks_csv(portfolio: EcosystemGovernancePortfolio) -> str:
return rows_to_csv(governance_checks_rows(portfolio))
def compact_governance_payload(portfolio: EcosystemGovernancePortfolio) -> dict[str, object]:
return {
"project_id": portfolio.project_id,
"generated_at": portfolio.generated_at,
"average_governance_score": portfolio.average_governance_score,
"blocked_platforms": portfolio.blocked_platforms,
"controlled_platforms": portfolio.controlled_platforms,
"cards": [
{
"platform_id": card.platform_id,
"status": card.status_label,
"governance_score": card.governance_score,
"human_score": card.human_score,
"maturity": card.maturity.value,
"blockers": [check.title for check in card.blockers],
"next_actions": card.next_actions[:5],
}
for card in portfolio.cards
],
"order_candidates": [
{
"candidate_id": candidate.candidate_id,
"platform_id": candidate.platform_id,
"order_type": candidate.order_type.value,
"title": candidate.title,
"priority": candidate.priority,
"source_check_ids": candidate.source_check_ids,
}
for candidate in portfolio.order_candidates
],
}
def governance_artifact_records(portfolio: EcosystemGovernancePortfolio, project_root: Path) -> tuple[dict[str, str], ...]:
records: list[dict[str, str]] = []
for path, description, function, file_type in (
("dados/governanca-operacional.json", "Portfolio completo de governanca operacional.", "governanca", "json"),
("dados/governanca-operacional-compacta.json", "Portfolio compacto para leitura rapida.", "governanca compacta", "json"),
("ecossistema/GOVERNANCA-OPERACIONAL-MAIS-HUMANA.md", "Relatorio Markdown de governanca operacional.", "governanca", "markdown"),
("ecossistema/CANDIDATAS-OS-GOVERNANCA.md", "Candidatas de OS nascidas de checks reais.", "ordens candidatas", "markdown"),
("matrizes/governanca-cards.csv", "Resumo tabular dos cards de governanca.", "matriz governanca", "csv"),
("matrizes/governanca-checks.csv", "Checks detalhados de governanca.", "matriz governanca", "csv"),
):
records.append(
{
"path": str(project_root / path),
"description": description,
"function": function,
"file_type": file_type,
}
)
return tuple(records)
def select_governance_exit_candidates(
portfolio: EcosystemGovernancePortfolio,
order_type: OrderType,
limit: int = 5,
) -> tuple[GovernanceOrderCandidate, ...]:
selected: list[GovernanceOrderCandidate] = []
for candidate in portfolio.order_candidates:
if candidate.order_type == order_type:
selected.append(candidate)
if len(selected) >= limit:
return tuple(selected)
for candidate in portfolio.order_candidates:
if candidate not in selected:
selected.append(candidate)
if len(selected) >= limit:
break
return tuple(selected)
def platform_domain_gap_counts(portfolio: EcosystemGovernancePortfolio) -> dict[str, dict[str, int]]:
counts: dict[str, dict[str, int]] = {}
for card in portfolio.cards:
bucket = counts.setdefault(card.platform_id, {})
for check in card.checks:
if check.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}:
bucket[check.domain.value] = bucket.get(check.domain.value, 0) + 1
return counts
def governance_to_plain_data(portfolio: EcosystemGovernancePortfolio) -> dict[str, object]:
return as_plain_data(portfolio)

View File

@@ -0,0 +1,206 @@
"""Artifact writers for the Mais Humana governance layer."""
from __future__ import annotations
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Iterable, Sequence
from .evidence_graph import EvidenceGraph, graph_dot, graph_markdown, graph_rows
from .exit_order_compiler import CompiledOrderSet, compiled_orders_markdown, order_coverage_rows, source_candidate_rows
from .governance_engine import (
EcosystemGovernancePortfolio,
compact_governance_payload,
governance_candidates_markdown,
governance_cards_csv,
governance_checks_csv,
governance_portfolio_markdown,
governance_to_plain_data,
rows_to_csv,
)
from .governance_scenarios import ScenarioPortfolio, scenario_rows, scenarios_markdown
from .human_readiness_registry import ReadinessRegistry, registry_markdown, registry_rows
from .models import GeneratedFile, as_plain_data
from .portfolio_queries import PortfolioQuestion, compact_question_payload, questions_markdown, questions_rows
from .round_assurance import AssuranceSuite, assurance_markdown, assurance_rows
from .runtime_budget import RoundLineBudget, budget_markdown, budget_rows
from .service_order_lifecycle import RoundExecutionPackage, lifecycle_audit_markdown, lifecycle_execution_markdown, lifecycle_pending_markdown, lifecycle_queue_markdown
from .workflow_registry import WorkflowPortfolio, workflow_markdown, workflow_rows
@dataclass(slots=True)
class GovernanceExport:
path: Path
description: str
function: str
file_type: str
writer: Callable[[Path], None]
@dataclass(slots=True)
class GovernanceExportBundle:
files: tuple[Path, ...]
generated_records: tuple[GeneratedFile, ...]
errors: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return {
"files": [str(path) for path in self.files],
"generated_records": [item.to_dict() for item in self.generated_records],
"errors": self.errors,
}
def write_json(path: Path, payload: object) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(as_plain_data(payload), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
def write_text(path: Path, text: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(text, encoding="utf-8")
def record_for(path: Path, project_root: Path, description: str, function: str, file_type: str, relation_to_order: str) -> GeneratedFile:
try:
rel = path.relative_to(project_root)
except ValueError:
rel = path
return GeneratedFile(
path=str(rel).replace("\\", "/"),
description=description,
function=function,
file_type=file_type,
changed_by="mais_humana.governance_exports",
change_summary=description,
relation_to_order=relation_to_order,
)
def export(
relative_path: str,
description: str,
function: str,
file_type: str,
writer: Callable[[Path], None],
root: Path,
) -> GovernanceExport:
return GovernanceExport(
path=root / relative_path,
description=description,
function=function,
file_type=file_type,
writer=writer,
)
def governance_exports(
project_root: Path,
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
scenarios: ScenarioPortfolio,
graph: EvidenceGraph,
questions: Sequence[PortfolioQuestion],
budget: RoundLineBudget | None = None,
compiled_orders: CompiledOrderSet | None = None,
lifecycle: RoundExecutionPackage | None = None,
assurance: AssuranceSuite | None = None,
) -> tuple[GovernanceExport, ...]:
exports: list[GovernanceExport] = [
export("dados/governanca-operacional.json", "Portfolio completo de governanca operacional.", "governanca", "json", lambda path: write_json(path, governance_to_plain_data(portfolio)), project_root),
export("dados/governanca-operacional-compacta.json", "Portfolio compacto de governanca operacional.", "governanca compacta", "json", lambda path: write_json(path, compact_governance_payload(portfolio)), project_root),
export("ecossistema/GOVERNANCA-OPERACIONAL-MAIS-HUMANA.md", "Relatorio de governanca operacional.", "governanca", "markdown", lambda path: write_text(path, governance_portfolio_markdown(portfolio)), project_root),
export("ecossistema/CANDIDATAS-OS-GOVERNANCA.md", "Candidatas de OS por governanca.", "ordens candidatas", "markdown", lambda path: write_text(path, governance_candidates_markdown(portfolio)), project_root),
export("matrizes/governanca-cards.csv", "Cards de governanca em CSV.", "matriz governanca", "csv", lambda path: write_text(path, governance_cards_csv(portfolio)), project_root),
export("matrizes/governanca-checks.csv", "Checks de governanca em CSV.", "matriz governanca", "csv", lambda path: write_text(path, governance_checks_csv(portfolio)), project_root),
export("dados/registro-prontidao-humana.json", "Registro plataforma x perfil x governanca.", "registro prontidao", "json", lambda path: write_json(path, registry), project_root),
export("ecossistema/REGISTRO-PRONTIDAO-HUMANA.md", "Registro humano de prontidao por perfil.", "registro prontidao", "markdown", lambda path: write_text(path, registry_markdown(registry)), project_root),
export("matrizes/registro-prontidao-humana.csv", "Registro humano em CSV.", "registro prontidao", "csv", lambda path: write_text(path, rows_to_csv(registry_rows(registry))), project_root),
export("dados/workflows-humanos.json", "Workflows humanos em JSON.", "workflows", "json", lambda path: write_json(path, workflows), project_root),
export("ecossistema/WORKFLOWS-HUMANOS-OPERACIONAIS.md", "Workflows humanos operacionais.", "workflows", "markdown", lambda path: write_text(path, workflow_markdown(workflows)), project_root),
export("matrizes/workflows-humanos.csv", "Workflows humanos em CSV.", "workflows", "csv", lambda path: write_text(path, rows_to_csv(workflow_rows(workflows))), project_root),
export("dados/cenarios-governanca.json", "Cenarios de aceite de governanca.", "cenarios", "json", lambda path: write_json(path, scenarios), project_root),
export("ecossistema/CENARIOS-ACEITE-GOVERNANCA.md", "Cenarios de aceite de governanca.", "cenarios", "markdown", lambda path: write_text(path, scenarios_markdown(scenarios)), project_root),
export("matrizes/cenarios-governanca.csv", "Cenarios de governanca em CSV.", "cenarios", "csv", lambda path: write_text(path, rows_to_csv(scenario_rows(scenarios))), project_root),
export("dados/grafo-evidencias.json", "Grafo de evidencias em JSON.", "grafo evidencias", "json", lambda path: write_json(path, graph), project_root),
export("ecossistema/GRAFO-EVIDENCIAS-MAIS-HUMANA.md", "Grafo de evidencias em Markdown.", "grafo evidencias", "markdown", lambda path: write_text(path, graph_markdown(graph)), project_root),
export("graficos/grafo-evidencias.dot", "Grafo de evidencias em DOT.", "grafo evidencias", "dot", lambda path: write_text(path, graph_dot(graph)), project_root),
export("matrizes/grafo-evidencias.csv", "Grafo de evidencias em CSV.", "grafo evidencias", "csv", lambda path: write_text(path, rows_to_csv(graph_rows(graph))), project_root),
export("dados/perguntas-governanca.json", "Perguntas operacionais sobre governanca.", "perguntas governanca", "json", lambda path: write_json(path, compact_question_payload(questions)), project_root),
export("ecossistema/PERGUNTAS-OPERACIONAIS-GOVERNANCA.md", "Perguntas operacionais sobre governanca.", "perguntas governanca", "markdown", lambda path: write_text(path, questions_markdown(questions)), project_root),
export("matrizes/perguntas-governanca.csv", "Perguntas de governanca em CSV.", "perguntas governanca", "csv", lambda path: write_text(path, rows_to_csv(questions_rows(questions))), project_root),
]
if budget is not None:
exports.extend(
[
export("dados/budget-linhas-rodada.json", "Budget de linhas da rodada.", "budget linhas", "json", lambda path: write_json(path, budget), project_root),
export("ecossistema/BUDGET-LINHAS-RODADA.md", "Budget de linhas da rodada.", "budget linhas", "markdown", lambda path: write_text(path, budget_markdown(budget)), project_root),
export("matrizes/budget-linhas-rodada.csv", "Budget de linhas em CSV.", "budget linhas", "csv", lambda path: write_text(path, rows_to_csv(budget_rows(budget))), project_root),
]
)
if compiled_orders is not None:
exports.extend(
[
export("dados/ordens-governanca-compiladas.json", "Ordens compiladas por governanca.", "ordens compiladas", "json", lambda path: write_json(path, compiled_orders), project_root),
export("ecossistema/ORDENS-GOVERNANCA-COMPILADAS.md", "Ordens compiladas por governanca.", "ordens compiladas", "markdown", lambda path: write_text(path, compiled_orders_markdown(compiled_orders)), project_root),
export("matrizes/ordens-governanca-compiladas.csv", "Ordens compiladas em CSV.", "ordens compiladas", "csv", lambda path: write_text(path, rows_to_csv(order_coverage_rows(compiled_orders))), project_root),
export("matrizes/candidatas-governanca-usadas.csv", "Candidatas usadas em ordens compiladas.", "ordens compiladas", "csv", lambda path: write_text(path, rows_to_csv(source_candidate_rows(compiled_orders))), project_root),
]
)
if lifecycle is not None:
exports.extend(
[
export("dados/lifecycle-ordens-ativas.json", "Lifecycle de ordens ativas.", "lifecycle ordens", "json", lambda path: write_json(path, lifecycle), project_root),
export("ecossistema/LIFECYCLE-ORDENS-ATIVAS.md", "Lifecycle de ordens ativas.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_execution_markdown(lifecycle)), project_root),
export("ecossistema/PENDENCIAS-LIFECYCLE-ORDENS.md", "Pendencias do lifecycle de ordens.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_pending_markdown(lifecycle)), project_root),
export("ecossistema/AUDITORIA-LIFECYCLE-ORDENS.md", "Auditoria do lifecycle de ordens.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_audit_markdown(lifecycle)), project_root),
export("ecossistema/FILA-ATIVA-LIFECYCLE.md", "Fila ativa do lifecycle.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_queue_markdown(lifecycle)), project_root),
]
)
if assurance is not None:
exports.extend(
[
export("dados/assurance-rodada.json", "Assurance da rodada em JSON.", "assurance", "json", lambda path: write_json(path, assurance), project_root),
export("ecossistema/ASSURANCE-RODADA-MAIS-HUMANA.md", "Assurance da rodada.", "assurance", "markdown", lambda path: write_text(path, assurance_markdown(assurance)), project_root),
export("matrizes/assurance-rodada.csv", "Assurance da rodada em CSV.", "assurance", "csv", lambda path: write_text(path, rows_to_csv(assurance_rows(assurance))), project_root),
]
)
return tuple(exports)
def write_governance_exports(
project_root: Path,
exports: Sequence[GovernanceExport],
relation_to_order: str,
) -> GovernanceExportBundle:
written: list[Path] = []
records: list[GeneratedFile] = []
errors: list[str] = []
for item in exports:
try:
item.writer(item.path)
written.append(item.path)
records.append(record_for(item.path, project_root, item.description, item.function, item.file_type, relation_to_order))
except OSError as exc:
errors.append(f"{item.path}: {exc}")
return GovernanceExportBundle(files=tuple(written), generated_records=tuple(records), errors=tuple(errors))
def central_lifecycle_exports(platform_folder: Path, lifecycle: RoundExecutionPackage) -> tuple[GovernanceExport, ...]:
return (
export("reports/EXECUTADO__fechamento-ordens-ativas.md", "Fechamento das ordens ativas.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_execution_markdown(lifecycle)), platform_folder),
export("reports/PENDENCIAS-CODEX__fechamento-ordens-ativas.md", "Pendencias do fechamento das ordens.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_pending_markdown(lifecycle)), platform_folder),
export("audit/AUDITORIA-GPT__fechamento-ordens-ativas.md", "Auditoria do fechamento das ordens.", "lifecycle ordens", "markdown", lambda path: write_text(path, lifecycle_audit_markdown(lifecycle)), platform_folder),
export("current/active-order-queue.md", "Fila ativa apos rodada.", "fila ativa", "markdown", lambda path: write_text(path, lifecycle_queue_markdown(lifecycle)), platform_folder),
export("indexes/orders-lifecycle-index.md", "Indice lifecycle das ordens.", "indice ordens", "markdown", lambda path: write_text(path, lifecycle_execution_markdown(lifecycle)), platform_folder),
)
def write_central_lifecycle_exports(platform_folder: Path, lifecycle: RoundExecutionPackage) -> tuple[Path, ...]:
written: list[Path] = []
for item in central_lifecycle_exports(platform_folder, lifecycle):
item.writer(item.path)
written.append(item.path)
return tuple(written)

View File

@@ -0,0 +1,564 @@
"""Typed governance models for the Mais Humana operational layer.
The platform already scans repositories and builds human-facing reports. This
module adds a more operational vocabulary around that data: domains, axes,
check templates, check results, order lifecycle records, and round closeout
packages. Keeping these records explicit makes it possible to explain why an
order was closed, why a blocker remains real, and which next order is justified
by local evidence.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, Mapping, Sequence
from .models import OrderType, as_plain_data, merge_unique, slugify, utc_now
class GovernanceDomain(str, Enum):
"""Cross-platform operational domains tracked by Mais Humana."""
REPOSITORY = "repository"
DOCUMENTATION = "documentation"
CONTRACT = "contract"
TESTS = "tests"
RUNTIME = "runtime"
MCP = "mcp"
IDENTITY = "identity"
BUSINESS = "business"
DOCS = "docs"
INTEGRATIONS = "integrations"
COMPLIANCE = "compliance"
FINANCE = "finance"
CUSTOMER_OPS = "customer_ops"
UI = "ui"
CLOUD = "cloud"
SECURITY = "security"
OBSERVABILITY = "observability"
SUPPORT = "support"
GOVERNANCE = "governance"
HUMAN_EXPERIENCE = "human_experience"
class GovernanceAxis(str, Enum):
"""A specific axis used to decide human-operational maturity."""
LOCAL_TRACEABILITY = "local_traceability"
HUMAN_DOCUMENTATION = "human_documentation"
CONTRACT_VERSIONING = "contract_versioning"
TESTABLE_BEHAVIOR = "testable_behavior"
PANEL_BACKEND = "panel_backend"
GPT_PANEL_EQUIVALENCE = "gpt_panel_equivalence"
IDENTITY_SCOPE = "identity_scope"
BUSINESS_GATE = "business_gate"
DOCS_CANONICALITY = "docs_canonicality"
BYOK_CREDENTIALS = "byok_credentials"
SECRET_REDACTION = "secret_redaction"
SUPPORT_DIAGNOSTICS = "support_diagnostics"
INCIDENT_LIFECYCLE = "incident_lifecycle"
AUDIT_EVIDENCE = "audit_evidence"
READINESS_HEALTH = "readiness_health"
CLOUD_OPERATION = "cloud_operation"
DATA_TRUTH = "data_truth"
RELEASE_GOVERNANCE = "release_governance"
COMMERCIAL_MATURITY = "commercial_maturity"
HUMAN_VALUE = "human_value"
class GovernanceStatus(str, Enum):
"""Normalized result for a governance check."""
PASS = "pass"
ATTENTION = "attention"
FAIL = "fail"
BLOCKED = "blocked"
EXCEPTION = "exception"
NOT_APPLICABLE = "not_applicable"
class GovernanceSeverity(str, Enum):
"""Severity assigned to a failed or partial check."""
INFO = "info"
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class GovernanceEvidenceKind(str, Enum):
"""Kind of evidence attached to a governance result."""
CODE = "code"
DOCUMENTATION = "documentation"
CONTRACT = "contract"
TEST = "test"
CONFIG = "config"
REPORT = "report"
ORDER = "order"
SQL = "sql"
RUNTIME = "runtime"
ABSENCE = "absence"
DERIVED = "derived"
class GovernanceMaturity(str, Enum):
"""Governance maturity from a long-running project perspective."""
ABSENT = "absent"
FOUNDATIONAL = "foundational"
LOCAL_READY = "local_ready"
EXPLAINABLE = "explainable"
PANEL_READY = "panel_ready"
CONTROLLED = "controlled"
INSTITUTIONAL = "institutional"
class OrderLifecycleStatus(str, Enum):
"""Lifecycle status used for central service-order files."""
PLANNED = "planejada"
RUNNING = "em_execucao"
COMPLETED = "concluida"
PARTIAL = "parcial"
BLOCKED = "bloqueada"
SUPERSEDED = "substituida"
UNKNOWN = "desconhecida"
class RoundMinimumStatus(str, Enum):
"""Whether a round minimum was met."""
MET = "met"
PARTIAL = "partial"
IMPOSSIBLE = "impossible"
SEVERITY_WEIGHT: dict[GovernanceSeverity, int] = {
GovernanceSeverity.INFO: 0,
GovernanceSeverity.LOW: 1,
GovernanceSeverity.MEDIUM: 3,
GovernanceSeverity.HIGH: 6,
GovernanceSeverity.CRITICAL: 10,
}
STATUS_SCORE: dict[GovernanceStatus, int] = {
GovernanceStatus.PASS: 100,
GovernanceStatus.EXCEPTION: 82,
GovernanceStatus.ATTENTION: 55,
GovernanceStatus.FAIL: 25,
GovernanceStatus.BLOCKED: 0,
GovernanceStatus.NOT_APPLICABLE: 100,
}
MATURITY_SCORE: dict[GovernanceMaturity, int] = {
GovernanceMaturity.ABSENT: 0,
GovernanceMaturity.FOUNDATIONAL: 20,
GovernanceMaturity.LOCAL_READY: 42,
GovernanceMaturity.EXPLAINABLE: 62,
GovernanceMaturity.PANEL_READY: 76,
GovernanceMaturity.CONTROLLED: 88,
GovernanceMaturity.INSTITUTIONAL: 100,
}
def severity_rank(value: GovernanceSeverity) -> int:
return SEVERITY_WEIGHT.get(value, 0)
def status_score(value: GovernanceStatus) -> int:
return STATUS_SCORE.get(value, 0)
def maturity_score(value: GovernanceMaturity) -> int:
return MATURITY_SCORE.get(value, 0)
def score_to_maturity(score: int) -> GovernanceMaturity:
score = max(0, min(100, int(score)))
if score >= 92:
return GovernanceMaturity.INSTITUTIONAL
if score >= 82:
return GovernanceMaturity.CONTROLLED
if score >= 72:
return GovernanceMaturity.PANEL_READY
if score >= 58:
return GovernanceMaturity.EXPLAINABLE
if score >= 36:
return GovernanceMaturity.LOCAL_READY
if score >= 12:
return GovernanceMaturity.FOUNDATIONAL
return GovernanceMaturity.ABSENT
def worst_severity(values: Iterable[GovernanceSeverity]) -> GovernanceSeverity:
ordered = sorted(values, key=severity_rank, reverse=True)
return ordered[0] if ordered else GovernanceSeverity.INFO
def normalize_path(path: str | Path) -> str:
return str(path).replace("\\", "/")
@dataclass(slots=True)
class GovernanceEvidence:
"""Evidence reference used by governance checks."""
path: str
summary: str
kind: GovernanceEvidenceKind = GovernanceEvidenceKind.DERIVED
line: int | None = None
confidence: float = 0.5
excerpt_hash: str = ""
@property
def reference(self) -> str:
path = normalize_path(self.path)
if self.line is None:
return path
return f"{path}:{self.line}"
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class CheckTemplate:
"""Declarative governance check evaluated against a platform report."""
check_id: str
axis: GovernanceAxis
domain: GovernanceDomain
title: str
purpose: str
positive_terms: tuple[str, ...]
negative_terms: tuple[str, ...]
required_terms: tuple[str, ...]
evidence_hints: tuple[str, ...]
applies_to: tuple[str, ...]
not_applicable_reason: str
pass_summary: str
attention_summary: str
fail_summary: str
suggested_action: str
validation_steps: tuple[str, ...]
severity: GovernanceSeverity = GovernanceSeverity.MEDIUM
required_for_human_ready: bool = False
creates_order_type: OrderType = OrderType.MANAGERIAL
order_title: str = ""
order_priority: str = "media"
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def normalized_order_title(self) -> str:
return self.order_title or self.title
def applies_to_platform(self, platform_id: str) -> bool:
return not self.applies_to or platform_id in self.applies_to
@dataclass(slots=True)
class GovernanceCheckResult:
"""Evaluated result for one governance check."""
check_id: str
platform_id: str
axis: GovernanceAxis
domain: GovernanceDomain
title: str
status: GovernanceStatus
severity: GovernanceSeverity
maturity: GovernanceMaturity
score: int
reason: str
next_action: str
evidence: tuple[GovernanceEvidence, ...]
validation_steps: tuple[str, ...]
order_title: str
order_type: OrderType
order_priority: str
required_for_human_ready: bool
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def is_blocking(self) -> bool:
return self.status == GovernanceStatus.BLOCKED or (
self.required_for_human_ready
and self.status in {GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}
)
@property
def needs_order(self) -> bool:
return self.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}
@property
def compact_status(self) -> str:
return f"{self.status.value}/{self.severity.value}/{self.score}"
@dataclass(slots=True)
class PlatformGovernanceCard:
"""Governance rollup for one platform."""
platform_id: str
title: str
repo_path: str
checks: tuple[GovernanceCheckResult, ...]
human_score: int
governance_score: int
maturity: GovernanceMaturity
blockers: tuple[GovernanceCheckResult, ...]
warnings: tuple[GovernanceCheckResult, ...]
strengths: tuple[str, ...]
next_actions: tuple[str, ...]
relation_summary: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def ready_for_human_round(self) -> bool:
return self.governance_score >= 72 and not self.blockers
@property
def status_label(self) -> str:
if any(item.severity == GovernanceSeverity.CRITICAL for item in self.blockers):
return "critico"
if self.blockers:
return "bloqueado"
if self.governance_score >= 82:
return "controlado"
if self.governance_score >= 62:
return "explicavel"
if self.governance_score >= 42:
return "local"
return "fundacao"
def checks_by_domain(self) -> dict[str, list[GovernanceCheckResult]]:
grouped: dict[str, list[GovernanceCheckResult]] = {}
for check in self.checks:
grouped.setdefault(check.domain.value, []).append(check)
for checks in grouped.values():
checks.sort(key=lambda item: (-severity_rank(item.severity), item.axis.value, item.title))
return grouped
@dataclass(slots=True)
class GovernanceOrderCandidate:
"""Candidate order created from failed or partial governance checks."""
candidate_id: str
platform_id: str
order_type: OrderType
title: str
purpose: str
reason: str
expected_result: str
priority: str
affected_paths: tuple[str, ...]
validations: tuple[str, ...]
source_check_ids: tuple[str, ...]
status: OrderLifecycleStatus = OrderLifecycleStatus.PLANNED
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def slug(self) -> str:
return slugify(self.title)
@dataclass(slots=True)
class EcosystemGovernancePortfolio:
"""Governance view for the whole ecosystem."""
project_id: str
cards: tuple[PlatformGovernanceCard, ...]
order_candidates: tuple[GovernanceOrderCandidate, ...]
executive_summary: tuple[str, ...]
managerial_summary: tuple[str, ...]
blockers_summary: tuple[str, ...]
relation_matrix: tuple[tuple[str, str, str], ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def average_governance_score(self) -> int:
if not self.cards:
return 0
return round(sum(card.governance_score for card in self.cards) / len(self.cards))
@property
def blocked_platforms(self) -> tuple[str, ...]:
return tuple(card.platform_id for card in self.cards if card.blockers)
@property
def controlled_platforms(self) -> tuple[str, ...]:
return tuple(card.platform_id for card in self.cards if card.governance_score >= 82 and not card.blockers)
def card_for(self, platform_id: str) -> PlatformGovernanceCard | None:
for card in self.cards:
if card.platform_id == platform_id:
return card
return None
@dataclass(slots=True)
class ParsedServiceOrder:
"""Parsed service-order file from the central platform folder."""
path: str
order_id: str
order_type: OrderType
project_id: str
status: OrderLifecycleStatus
priority: str
title: str
platform_hint: str
purpose: str
object_scope: str
reason: str
expected_result: str
affected_paths: tuple[str, ...]
validations: tuple[str, ...]
raw_headings: Mapping[str, str]
parsed_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def is_active(self) -> bool:
return self.status in {
OrderLifecycleStatus.PLANNED,
OrderLifecycleStatus.RUNNING,
OrderLifecycleStatus.PARTIAL,
OrderLifecycleStatus.UNKNOWN,
}
@property
def short_name(self) -> str:
if "__" in self.order_id:
return self.order_id.split("__", 1)[1].replace("-", " ")
return self.title or self.order_id
@dataclass(slots=True)
class OrderLifecycleDecision:
"""Decision produced for one parsed order in the current round."""
order: ParsedServiceOrder
final_status: OrderLifecycleStatus
platform_id: str
reason: str
evidence: tuple[GovernanceEvidence, ...]
pending_items: tuple[str, ...]
resulting_candidates: tuple[str, ...]
validation_steps: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def has_pending(self) -> bool:
return bool(self.pending_items)
@property
def compact_line(self) -> str:
return f"{self.order.order_id}|{self.final_status.value}|{self.platform_id}|{len(self.pending_items)}"
@dataclass(slots=True)
class RoundMinimum:
"""One minimum required by the service-order protocol."""
minimum_id: str
title: str
required_value: int
actual_value: int
status: RoundMinimumStatus
reason: str
next_action: str
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class RoundExecutionPackage:
"""Full operational package for this service-order round."""
round_id: str
project_id: str
parsed_orders: tuple[ParsedServiceOrder, ...]
decisions: tuple[OrderLifecycleDecision, ...]
minimums: tuple[RoundMinimum, ...]
output_candidates: tuple[GovernanceOrderCandidate, ...]
active_after_round: tuple[str, ...]
completed_count: int
partial_count: int
blocked_count: int
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def pending_items(self) -> tuple[str, ...]:
items: list[str] = []
for decision in self.decisions:
for pending in decision.pending_items:
items.append(f"{decision.order.order_id}: {pending}")
for minimum in self.minimums:
if minimum.status != RoundMinimumStatus.MET:
items.append(f"{minimum.title}: {minimum.reason} Proxima acao: {minimum.next_action}")
return merge_unique(items)
@property
def success_label(self) -> str:
if self.blocked_count:
return "parcial-com-bloqueios"
if self.partial_count:
return "parcial"
return "concluida"
def evidence_from_path(path: str | Path, summary: str, *, kind: GovernanceEvidenceKind = GovernanceEvidenceKind.DERIVED) -> GovernanceEvidence:
return GovernanceEvidence(path=normalize_path(path), summary=summary, kind=kind, confidence=0.65)
def minimum_status(actual: int, required: int, impossible: bool = False) -> RoundMinimumStatus:
if actual >= required:
return RoundMinimumStatus.MET
if impossible:
return RoundMinimumStatus.IMPOSSIBLE
return RoundMinimumStatus.PARTIAL
def dedupe_candidates(candidates: Iterable[GovernanceOrderCandidate], limit: int | None = None) -> tuple[GovernanceOrderCandidate, ...]:
seen: set[tuple[str, str, str]] = set()
output: list[GovernanceOrderCandidate] = []
for candidate in sorted(candidates, key=lambda item: (item.order_type.value, item.priority, item.platform_id, item.title)):
key = (candidate.platform_id, candidate.order_type.value, candidate.slug)
if key in seen:
continue
seen.add(key)
output.append(candidate)
if limit is not None and len(output) >= limit:
break
return tuple(output)

View File

@@ -0,0 +1,326 @@
"""Acceptance scenarios for governance-driven service-order execution."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .governance_models import EcosystemGovernancePortfolio, GovernanceStatus
from .models import as_plain_data, merge_unique
@dataclass(slots=True)
class ScenarioAssertion:
assertion_id: str
platform_id: str
expected_terms: tuple[str, ...]
forbidden_terms: tuple[str, ...]
explanation: str
remediation: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class GovernanceScenario:
scenario_id: str
title: str
purpose: str
owner_domain: str
assertions: tuple[ScenarioAssertion, ...]
success_artifacts: tuple[str, ...]
failure_impact: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class ScenarioAssertionResult:
assertion_id: str
platform_id: str
passed: bool
missing_terms: tuple[str, ...]
forbidden_hits: tuple[str, ...]
explanation: str
remediation: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class ScenarioEvaluation:
scenario_id: str
title: str
status: str
score: int
assertion_results: tuple[ScenarioAssertionResult, ...]
next_actions: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class ScenarioPortfolio:
scenarios: tuple[GovernanceScenario, ...]
evaluations: tuple[ScenarioEvaluation, ...]
summary: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def assertion(
assertion_id: str,
platform_id: str,
expected_terms: Iterable[str],
forbidden_terms: Iterable[str],
explanation: str,
remediation: str,
) -> ScenarioAssertion:
return ScenarioAssertion(
assertion_id=assertion_id,
platform_id=platform_id,
expected_terms=tuple(expected_terms),
forbidden_terms=tuple(forbidden_terms),
explanation=explanation,
remediation=remediation,
)
SCENARIOS: tuple[GovernanceScenario, ...] = (
GovernanceScenario(
"docs-blocker-reconciliation",
"Docs deixa de ser blocker ambiguo",
"A rodada deve transformar Docs catalogOnly em leitura minima responseReady ou excecao deliberada.",
"docs",
(
assertion("docs-decision", "docs", ("docs", "catalog", "response", "excecao"), ("ambig",), "Docs precisa de decisao formal.", "registrar decisao Docs"),
assertion("mcp-readiness", "mcps", ("readiness", "docs", "blocker"), ("global indevido",), "MCP deve refletir a decisao.", "atualizar readiness MCP"),
assertion("ui-explainable", "ui", ("panel", "same", "source"), ("diverg",), "UI/GPT devem explicar a mesma fonte.", "validar sameSource"),
),
("decisao Docs", "readiness MCP", "contrato UI/GPT"),
"Docs segue bloqueando maturidade global sem clareza institucional.",
),
GovernanceScenario(
"byok-tenant-controlled",
"BYOK por tenant sem vazamento",
"Validar BYOK completo com ator, organizacao, entitlement, credentialRef, smoke e audit.",
"integrations",
(
assertion("identity-actor", "identity", ("actor", "organization", "tenant"), ("unknown",), "Ator e tenant precisam existir.", "criar usuario e organizacao"),
assertion("business-entitlement", "business", ("entitlement", "product", "plan"), ("sem entitlement",), "Uso precisa ser autorizado comercialmente.", "vincular produto"),
assertion("credential-ref", "integracoes", ("byok", "credentialref", "smoke"), ("token bruto", "secret="), "Credencial deve virar ref segura.", "gerar credentialRef e smoke"),
assertion("redaction", "compliance", ("redaction", "audit", "trace"), ("password", "api_key"), "Nao vazamento deve ser provado.", "executar redaction check"),
),
("credentialRef", "smoke readonly", "auditId", "usage"),
"Autosservico parece pronto, mas falha no primeiro cliente real.",
),
GovernanceScenario(
"business-sellable-control",
"Produto vendavel com gate Business",
"Garantir que produto, plano, quota e bloqueio venham de Business.",
"business",
(
assertion("plan", "business", ("plan", "entitlement", "quota"), ("divergente",), "Plano e quota precisam de fonte unica.", "publicar contrato Business"),
assertion("billing", "finance", ("invoice", "usage", "reconciliation"), ("sem fatura",), "Finance precisa reconciliar cobranca.", "gerar extrato"),
assertion("support", "customer_ops", ("support", "diagnostic", "next"), ("erro bruto",), "Suporte precisa explicar bloqueio.", "criar mensagem humana"),
),
("entitlement", "usage", "invoice", "diagnostic"),
"Venda e suporte ficam baseados em regras dispersas.",
),
GovernanceScenario(
"panel-gpt-same-source",
"Painel e GPT compartilham a mesma fonte",
"Garantir que screenData, explicacao GPT e evidencia tenham hashes compatíveis.",
"mcp",
(
assertion("screen", "mcps", ("screen", "view", "panel"), ("fonte paralela",), "MCP deve criar instancia de tela.", "criar viewInstance"),
assertion("hashes", "mcps", ("source", "hash", "records"), ("mismatch",), "Hashes devem existir.", "gerar source hashes"),
assertion("ui", "ui", ("panelready", "samesource"), ("false",), "UI deve renderizar mesmo contrato.", "validar UI"),
assertion("docs", "docs", ("contract", "schema", "version"), ("sem versao",), "Docs deve registrar contrato.", "publicar contrato"),
),
("viewInstance", "sourcePayloadHash", "sourceRecordsHash", "contractVersion"),
"Painel e GPT podem divergir e quebrar confianca humana.",
),
GovernanceScenario(
"identity-rbac-negative-path",
"RBAC com caminho negado provado",
"Provar tanto allow quanto deny para papel, escopo e tenant.",
"identity",
(
assertion("roles", "identity", ("role", "scope", "permission"), ("allow all",), "Papeis precisam estar mapeados.", "criar matriz RBAC"),
assertion("deny", "identity", ("deny", "403", "forbidden"), ("sem negacao",), "Negacao precisa ser testada.", "criar teste deny"),
assertion("audit", "compliance", ("audit", "trace"), ("sem audit",), "Decisao precisa ser auditavel.", "registrar auditId"),
),
("matriz RBAC", "deny evidence", "auditId"),
"Permissao fica insegura e suporte nao consegue explicar negacao.",
),
GovernanceScenario(
"cloudflare-wrangler-not-plugin",
"Cloudflare opera por wrangler",
"Tratar plugin como teste inicial e usar wrangler para trabalho real.",
"cloud",
(
assertion("plugin", "integracoes", ("plugin", "cloudflare", "esperada"), ("blocker",), "Plugin deve ser premissa, nao blocker.", "registrar tentativa"),
assertion("wrangler", "integracoes", ("wrangler", "deploy", "secret"), ("sem wrangler",), "Trabalho real precisa usar wrangler.", "validar wrangler"),
assertion("runtime", "integracoes", ("route", "binding", "health"), ("missing",), "Runtime precisa ter rotas e bindings.", "validar rota/binding"),
),
("plugin attempt", "wrangler status", "route health"),
"Diagnostico Cloudflare fica preso em plugin experimental.",
),
GovernanceScenario(
"customer-ops-support-cycle",
"Suporte fecha ciclo de incidente",
"Atendimento precisa abrir, classificar, encaminhar, resolver e auditar incidente.",
"support",
(
assertion("ticket", "customer_ops", ("ticket", "incident", "open"), ("sem ticket",), "Ticket precisa existir.", "abrir incidente"),
assertion("handoff", "customer_ops", ("handoff", "owner", "sla"), ("sem owner",), "Responsavel precisa ser definido.", "encaminhar"),
assertion("resolution", "customer_ops", ("resolved", "evidence"), ("sem fechamento",), "Resolucao precisa ter evidencia.", "fechar incidente"),
assertion("audit", "compliance", ("audit", "trace"), ("sem audit",), "Ciclo precisa ser auditavel.", "registrar audit"),
),
("ticket", "owner", "resolution", "audit"),
"Suporte perde contexto e repete falhas.",
),
GovernanceScenario(
"release-version-rollback",
"Release tem versao, regressao e rollback",
"Cada mudanca precisa de contrato, smoke, rollback e changelog.",
"governance",
(
assertion("version", "platform_base", ("contractversion", "schemaversion"), ("sem versao",), "Versao precisa ser explicita.", "gerar contrato"),
assertion("smoke", "mcps", ("smoke", "readiness"), ("sem smoke",), "Smoke precisa rodar.", "executar smoke"),
assertion("rollback", "platform_base", ("rollback", "previous"), ("sem rollback",), "Rollback precisa existir.", "registrar rollback"),
assertion("docs", "docs", ("changelog", "migration"), ("sem changelog",), "Mudanca precisa ser documentada.", "publicar changelog"),
),
("contractVersion", "smoke", "rollback", "changelog"),
"Mudancas futuras quebram contrato sem reversao clara.",
),
)
def card_text(portfolio: EcosystemGovernancePortfolio, platform_id: str) -> str:
card = portfolio.card_for(platform_id)
if card is None:
return ""
parts: list[str] = [card.platform_id, card.status_label, card.maturity.value]
for check in card.checks:
parts.extend([check.check_id, check.title, check.status.value, check.reason, check.next_action])
for evidence in check.evidence:
parts.extend([evidence.path, evidence.summary])
return "\n".join(parts).lower()
def evaluate_assertion(assertion_item: ScenarioAssertion, portfolio: EcosystemGovernancePortfolio) -> ScenarioAssertionResult:
text = card_text(portfolio, assertion_item.platform_id)
missing = tuple(term for term in assertion_item.expected_terms if term.lower() not in text)
forbidden = tuple(term for term in assertion_item.forbidden_terms if term.lower() in text)
card = portfolio.card_for(assertion_item.platform_id)
blocked = card is None or (card.blockers and any(check.status == GovernanceStatus.BLOCKED for check in card.blockers))
passed = not missing and not forbidden and not blocked
return ScenarioAssertionResult(
assertion_id=assertion_item.assertion_id,
platform_id=assertion_item.platform_id,
passed=passed,
missing_terms=missing,
forbidden_hits=forbidden,
explanation=assertion_item.explanation,
remediation=assertion_item.remediation,
)
def evaluate_scenario(scenario: GovernanceScenario, portfolio: EcosystemGovernancePortfolio) -> ScenarioEvaluation:
results = tuple(evaluate_assertion(assertion_item, portfolio) for assertion_item in scenario.assertions)
passed = sum(1 for item in results if item.passed)
total = len(results)
score = round((passed / total) * 100) if total else 0
status = "pronto" if score >= 90 else "util" if score >= 70 else "atencao" if score >= 40 else "bloqueado"
next_actions = merge_unique(item.remediation for item in results if not item.passed)
return ScenarioEvaluation(
scenario_id=scenario.scenario_id,
title=scenario.title,
status=status,
score=score,
assertion_results=results,
next_actions=next_actions,
)
def build_scenario_portfolio(portfolio: EcosystemGovernancePortfolio, scenarios: Sequence[GovernanceScenario] = SCENARIOS) -> ScenarioPortfolio:
evaluations = tuple(evaluate_scenario(scenario, portfolio) for scenario in scenarios)
avg = round(sum(item.score for item in evaluations) / len(evaluations)) if evaluations else 0
blocked = sum(1 for item in evaluations if item.status == "bloqueado")
ready = sum(1 for item in evaluations if item.status in {"pronto", "util"})
summary = (
f"Cenarios avaliados: {len(evaluations)}",
f"Cenarios prontos/uteis: {ready}",
f"Cenarios bloqueados: {blocked}",
f"Score medio de cenario: {avg}",
)
return ScenarioPortfolio(scenarios=tuple(scenarios), evaluations=evaluations, summary=summary)
def scenarios_markdown(portfolio: ScenarioPortfolio) -> str:
by_id = {scenario.scenario_id: scenario for scenario in portfolio.scenarios}
lines = ["# Cenarios de aceite de governanca", ""]
lines.extend(f"- {item}" for item in portfolio.summary)
lines.append("")
for evaluation in sorted(portfolio.evaluations, key=lambda item: (item.score, item.scenario_id)):
scenario = by_id[evaluation.scenario_id]
lines.append(f"## {scenario.title}")
lines.append("")
lines.append(f"- scenario_id: `{scenario.scenario_id}`")
lines.append(f"- dominio: `{scenario.owner_domain}`")
lines.append(f"- status: `{evaluation.status}`")
lines.append(f"- score: `{evaluation.score}`")
lines.append(f"- impacto se falhar: {scenario.failure_impact}")
if evaluation.next_actions:
lines.append("- proximas acoes:")
for action in evaluation.next_actions:
lines.append(f" - {action}")
lines.append("- assertions:")
for result in evaluation.assertion_results:
status = "ok" if result.passed else "falha"
details = []
if result.missing_terms:
details.append("faltam " + ", ".join(result.missing_terms))
if result.forbidden_hits:
details.append("proibidos " + ", ".join(result.forbidden_hits))
detail_text = "; ".join(details) if details else result.explanation
lines.append(f" - `{status}` `{result.platform_id}.{result.assertion_id}`: {detail_text}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def scenario_rows(portfolio: ScenarioPortfolio) -> list[list[str]]:
rows = [["scenario_id", "status", "score", "assertion_id", "platform", "passed", "missing", "forbidden", "remediation"]]
for evaluation in portfolio.evaluations:
for result in evaluation.assertion_results:
rows.append(
[
evaluation.scenario_id,
evaluation.status,
str(evaluation.score),
result.assertion_id,
result.platform_id,
"yes" if result.passed else "no",
" | ".join(result.missing_terms),
" | ".join(result.forbidden_hits),
result.remediation,
]
)
return rows
def scenario_action_items(portfolio: ScenarioPortfolio, limit: int = 25) -> tuple[str, ...]:
actions: list[str] = []
for evaluation in sorted(portfolio.evaluations, key=lambda item: (item.score, item.scenario_id)):
for action in evaluation.next_actions:
actions.append(f"{evaluation.scenario_id}: {action}")
if len(actions) >= limit:
return merge_unique(actions)
return merge_unique(actions)

View File

@@ -0,0 +1,389 @@
"""SQLite persistence for governance-specific semantic state."""
from __future__ import annotations
import json
import sqlite3
from pathlib import Path
from typing import Iterable
from .governance_models import EcosystemGovernancePortfolio, PlatformGovernanceCard, GovernanceCheckResult
from .human_readiness_registry import ReadinessRegistry, ReadinessRegistryEntry
from .models import as_plain_data, utc_now
from .round_assurance import AssuranceSuite, AssuranceCase
from .runtime_budget import RoundLineBudget, RepositoryLineBudget
from .service_order_lifecycle import RoundExecutionPackage, OrderLifecycleDecision
from .workflow_registry import WorkflowPortfolio, WorkflowEvaluation
from .governance_scenarios import ScenarioPortfolio, ScenarioEvaluation
GOVERNANCE_SCHEMA = """
CREATE TABLE IF NOT EXISTS governance_cards (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_id TEXT UNIQUE NOT NULL,
status_label TEXT NOT NULL,
governance_score INTEGER NOT NULL,
human_score INTEGER NOT NULL,
maturity TEXT NOT NULL,
blocker_count INTEGER NOT NULL,
warning_count INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS governance_checks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
check_key TEXT UNIQUE NOT NULL,
platform_id TEXT NOT NULL,
check_id TEXT NOT NULL,
axis TEXT NOT NULL,
domain TEXT NOT NULL,
status TEXT NOT NULL,
severity TEXT NOT NULL,
score INTEGER NOT NULL,
next_action TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS readiness_registry (
id INTEGER PRIMARY KEY AUTOINCREMENT,
entry_id TEXT UNIQUE NOT NULL,
platform_id TEXT NOT NULL,
profile_id TEXT NOT NULL,
human_score INTEGER NOT NULL,
governance_score INTEGER NOT NULL,
status TEXT NOT NULL,
recommended_action TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS workflow_evaluations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
workflow_id TEXT UNIQUE NOT NULL,
status TEXT NOT NULL,
score INTEGER NOT NULL,
passed_steps INTEGER NOT NULL,
total_steps INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS scenario_evaluations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scenario_id TEXT UNIQUE NOT NULL,
status TEXT NOT NULL,
score INTEGER NOT NULL,
assertion_count INTEGER NOT NULL,
failed_assertion_count INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS assurance_cases (
id INTEGER PRIMARY KEY AUTOINCREMENT,
case_id TEXT UNIQUE NOT NULL,
passed INTEGER NOT NULL,
severity TEXT NOT NULL,
required INTEGER NOT NULL,
title TEXT NOT NULL,
next_action TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS lifecycle_decisions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
order_id TEXT UNIQUE NOT NULL,
final_status TEXT NOT NULL,
platform_id TEXT NOT NULL,
pending_count INTEGER NOT NULL,
resulting_candidate_count INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS line_budgets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
repo_name TEXT UNIQUE NOT NULL,
exists_flag INTEGER NOT NULL,
files_seen INTEGER NOT NULL,
files_counted INTEGER NOT NULL,
code_lines INTEGER NOT NULL,
technical_lines INTEGER NOT NULL,
warnings_json TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
"""
def ensure_governance_schema(conn: sqlite3.Connection) -> None:
conn.executescript(GOVERNANCE_SCHEMA)
def payload(value: object) -> str:
return json.dumps(as_plain_data(value), ensure_ascii=False, sort_keys=True)
def upsert_governance_card(conn: sqlite3.Connection, card: PlatformGovernanceCard, now: str) -> None:
conn.execute(
"""
INSERT INTO governance_cards (
platform_id, status_label, governance_score, human_score, maturity,
blocker_count, warning_count, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(platform_id) DO UPDATE SET
status_label=excluded.status_label,
governance_score=excluded.governance_score,
human_score=excluded.human_score,
maturity=excluded.maturity,
blocker_count=excluded.blocker_count,
warning_count=excluded.warning_count,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
card.platform_id,
card.status_label,
card.governance_score,
card.human_score,
card.maturity.value,
len(card.blockers),
len(card.warnings),
payload(card),
now,
),
)
def upsert_governance_check(conn: sqlite3.Connection, check: GovernanceCheckResult, now: str) -> None:
key = f"{check.platform_id}:{check.check_id}"
conn.execute(
"""
INSERT INTO governance_checks (
check_key, platform_id, check_id, axis, domain, status, severity,
score, next_action, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(check_key) DO UPDATE SET
axis=excluded.axis,
domain=excluded.domain,
status=excluded.status,
severity=excluded.severity,
score=excluded.score,
next_action=excluded.next_action,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
key,
check.platform_id,
check.check_id,
check.axis.value,
check.domain.value,
check.status.value,
check.severity.value,
check.score,
check.next_action,
payload(check),
now,
),
)
def upsert_registry_entry(conn: sqlite3.Connection, entry: ReadinessRegistryEntry, now: str) -> None:
conn.execute(
"""
INSERT INTO readiness_registry (
entry_id, platform_id, profile_id, human_score, governance_score,
status, recommended_action, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(entry_id) DO UPDATE SET
human_score=excluded.human_score,
governance_score=excluded.governance_score,
status=excluded.status,
recommended_action=excluded.recommended_action,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
entry.entry_id,
entry.platform_id,
entry.profile_id,
entry.human_score,
entry.governance_score,
entry.status,
entry.recommended_action,
payload(entry),
now,
),
)
def upsert_workflow(conn: sqlite3.Connection, evaluation: WorkflowEvaluation, now: str) -> None:
conn.execute(
"""
INSERT INTO workflow_evaluations (
workflow_id, status, score, passed_steps, total_steps, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(workflow_id) DO UPDATE SET
status=excluded.status,
score=excluded.score,
passed_steps=excluded.passed_steps,
total_steps=excluded.total_steps,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(evaluation.workflow_id, evaluation.status, evaluation.score, evaluation.passed_steps, evaluation.total_steps, payload(evaluation), now),
)
def upsert_scenario(conn: sqlite3.Connection, evaluation: ScenarioEvaluation, now: str) -> None:
failed = sum(1 for item in evaluation.assertion_results if not item.passed)
conn.execute(
"""
INSERT INTO scenario_evaluations (
scenario_id, status, score, assertion_count, failed_assertion_count, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(scenario_id) DO UPDATE SET
status=excluded.status,
score=excluded.score,
assertion_count=excluded.assertion_count,
failed_assertion_count=excluded.failed_assertion_count,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(evaluation.scenario_id, evaluation.status, evaluation.score, len(evaluation.assertion_results), failed, payload(evaluation), now),
)
def upsert_assurance_case(conn: sqlite3.Connection, case: AssuranceCase, now: str) -> None:
conn.execute(
"""
INSERT INTO assurance_cases (
case_id, passed, severity, required, title, next_action, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(case_id) DO UPDATE SET
passed=excluded.passed,
severity=excluded.severity,
required=excluded.required,
title=excluded.title,
next_action=excluded.next_action,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(case.case_id, 1 if case.passed else 0, case.severity, 1 if case.required else 0, case.title, case.next_action, payload(case), now),
)
def upsert_lifecycle_decision(conn: sqlite3.Connection, decision: OrderLifecycleDecision, now: str) -> None:
conn.execute(
"""
INSERT INTO lifecycle_decisions (
order_id, final_status, platform_id, pending_count, resulting_candidate_count,
payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(order_id) DO UPDATE SET
final_status=excluded.final_status,
platform_id=excluded.platform_id,
pending_count=excluded.pending_count,
resulting_candidate_count=excluded.resulting_candidate_count,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
decision.order.order_id,
decision.final_status.value,
decision.platform_id,
len(decision.pending_items),
len(decision.resulting_candidates),
payload(decision),
now,
),
)
def upsert_line_budget(conn: sqlite3.Connection, repo: RepositoryLineBudget, now: str) -> None:
conn.execute(
"""
INSERT INTO line_budgets (
repo_name, exists_flag, files_seen, files_counted, code_lines,
technical_lines, warnings_json, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(repo_name) DO UPDATE SET
exists_flag=excluded.exists_flag,
files_seen=excluded.files_seen,
files_counted=excluded.files_counted,
code_lines=excluded.code_lines,
technical_lines=excluded.technical_lines,
warnings_json=excluded.warnings_json,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
repo.repo_name,
1 if repo.exists else 0,
repo.files_seen,
repo.files_counted,
repo.code_lines,
repo.technical_lines,
json.dumps(list(repo.warnings), ensure_ascii=False),
payload(repo),
now,
),
)
def write_governance_semantic_state(
sqlite_path: Path,
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
scenarios: ScenarioPortfolio,
assurance: AssuranceSuite | None = None,
lifecycle: RoundExecutionPackage | None = None,
budget: RoundLineBudget | None = None,
) -> None:
sqlite_path.parent.mkdir(parents=True, exist_ok=True)
now = utc_now()
with sqlite3.connect(sqlite_path) as conn:
ensure_governance_schema(conn)
for card in portfolio.cards:
upsert_governance_card(conn, card, now)
for check in card.checks:
upsert_governance_check(conn, check, now)
for entry in registry.entries:
upsert_registry_entry(conn, entry, now)
for evaluation in workflows.evaluations:
upsert_workflow(conn, evaluation, now)
for evaluation in scenarios.evaluations:
upsert_scenario(conn, evaluation, now)
if assurance is not None:
for case in assurance.cases:
upsert_assurance_case(conn, case, now)
if lifecycle is not None:
for decision in lifecycle.decisions:
upsert_lifecycle_decision(conn, decision, now)
if budget is not None:
for repo in budget.repositories:
upsert_line_budget(conn, repo, now)
conn.commit()
def governance_table_counts(sqlite_path: Path) -> dict[str, int]:
tables = (
"governance_cards",
"governance_checks",
"readiness_registry",
"workflow_evaluations",
"scenario_evaluations",
"assurance_cases",
"lifecycle_decisions",
"line_budgets",
)
if not sqlite_path.exists():
return {table: 0 for table in tables}
with sqlite3.connect(sqlite_path) as conn:
ensure_governance_schema(conn)
return {table: int(conn.execute(f"SELECT COUNT(*) FROM {table}").fetchone()[0]) for table in tables}

View File

@@ -0,0 +1,119 @@
"""HTML export for quick local review of human reports."""
from __future__ import annotations
from pathlib import Path
from typing import Sequence
from .models import PlatformHumanReport, ReportBundle
from .quality import PlatformQualityReport
def esc(value: object) -> str:
return (
str(value)
.replace("&", "&amp;")
.replace("<", "&lt;")
.replace(">", "&gt;")
.replace('"', "&quot;")
)
def html_shell(title: str, body: str) -> str:
return f"""<!doctype html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{esc(title)}</title>
<style>
body {{ font-family: Arial, sans-serif; margin: 0; color: #18202a; background: #f8fafc; }}
header {{ padding: 28px 36px; background: #0f172a; color: #f8fafc; }}
main {{ padding: 24px 36px 56px; max-width: 1180px; margin: 0 auto; }}
table {{ border-collapse: collapse; width: 100%; background: white; }}
th, td {{ border: 1px solid #dbe3ef; padding: 8px 10px; text-align: left; vertical-align: top; }}
th {{ background: #e8eef7; }}
.grid {{ display: grid; grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); gap: 12px; }}
.card {{ background: white; border: 1px solid #dbe3ef; border-radius: 8px; padding: 14px; }}
.score {{ font-weight: 700; font-size: 22px; }}
.ok {{ color: #1f7a4d; }}
.warn {{ color: #a16207; }}
.blocker {{ color: #b42318; }}
</style>
</head>
<body>
<header><h1>{esc(title)}</h1></header>
<main>{body}</main>
</body>
</html>
"""
def report_card(report: PlatformHumanReport, quality: PlatformQualityReport | None) -> str:
quality_text = ""
if quality is not None:
status_class = "ok" if quality.human_ready else "blocker" if quality.blocker_count else "warn"
quality_text = (
f'<p class="{status_class}">technical_ready={quality.technical_ready}; '
f"human_ready={quality.human_ready}; blockers={quality.blocker_count}; warnings={quality.warning_count}</p>"
)
gaps = "".join(f"<li>{esc(gap)}</li>" for gap in report.missing_for_humans[:4])
return (
'<section class="card">'
f"<h2>{esc(report.platform.title)}</h2>"
f'<div class="score">{report.average_score}</div>'
f"<p>{esc(report.summary)}</p>"
+ quality_text
+ "<ul>"
+ gaps
+ "</ul>"
"</section>"
)
def index_html(reports: Sequence[PlatformHumanReport], qualities: Sequence[PlatformQualityReport], bundle: ReportBundle | None = None) -> str:
quality_by_platform = {quality.platform_id: quality for quality in qualities}
cards = "".join(report_card(report, quality_by_platform.get(report.platform.platform_id)) for report in reports)
rows = []
for report in sorted(reports, key=lambda item: item.platform.platform_id):
quality = quality_by_platform.get(report.platform.platform_id)
rows.append(
"<tr>"
f"<td>{esc(report.platform.platform_id)}</td>"
f"<td>{report.average_score}</td>"
f"<td>{report.scan.code_lines}</td>"
f"<td>{len(report.scan.evidence)}</td>"
f"<td>{esc(quality.human_ready if quality else 'n/a')}</td>"
f"<td>{esc('; '.join(report.scan.warnings[:3]))}</td>"
"</tr>"
)
intro = ""
if bundle is not None:
intro = (
f"<p>Gerados {len(bundle.generated_files)} arquivos, "
f"{bundle.matrix_cells} celulas de matriz e "
f"{bundle.total_code_lines_analyzed} linhas de codigo analisadas.</p>"
)
body = (
intro
+ '<section class="grid">'
+ cards
+ "</section>"
+ "<h2>Resumo tabular</h2>"
+ "<table><thead><tr><th>Plataforma</th><th>Score</th><th>Linhas</th><th>Evidencias</th><th>Human ready</th><th>Avisos</th></tr></thead><tbody>"
+ "".join(rows)
+ "</tbody></table>"
)
return html_shell("Mais Humana - indice operacional", body)
def write_index_html(
path: Path,
reports: Sequence[PlatformHumanReport],
qualities: Sequence[PlatformQualityReport],
bundle: ReportBundle | None = None,
) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(index_html(reports, qualities, bundle), encoding="utf-8")
return path

View File

@@ -0,0 +1,209 @@
"""Human readiness registry derived from platform and governance reports."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
from .catalog import HUMAN_PROFILES, PLATFORM_BY_ID
from .governance_models import EcosystemGovernancePortfolio, GovernanceDomain, GovernanceStatus
from .models import MatrixCell, PlatformHumanReport, as_plain_data, merge_unique
@dataclass(slots=True)
class ReadinessRegistryEntry:
entry_id: str
platform_id: str
profile_id: str
platform_title: str
profile_name: str
human_score: int
governance_score: int
status: str
strengths: tuple[str, ...]
gaps: tuple[str, ...]
evidence_refs: tuple[str, ...]
recommended_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class ReadinessRegistry:
project_id: str
entries: tuple[ReadinessRegistryEntry, ...]
platform_summary: tuple[tuple[str, int, int, str], ...]
profile_summary: tuple[tuple[str, int, int], ...]
domain_gaps: tuple[tuple[str, str, int], ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@property
def weak_entries(self) -> tuple[ReadinessRegistryEntry, ...]:
return tuple(entry for entry in self.entries if entry.human_score < 60 or entry.status in {"bloqueado", "critico"})
def cell_by_platform_profile(reports: Sequence[PlatformHumanReport]) -> dict[tuple[str, str], MatrixCell]:
cells: dict[tuple[str, str], MatrixCell] = {}
for report in reports:
for cell in report.cells:
cells[(cell.platform_id, cell.profile_id)] = cell
return cells
def status_for_entry(human_score: int, governance_score: int, card_status: str) -> str:
if card_status in {"critico", "bloqueado"}:
return card_status
if human_score >= 85 and governance_score >= 80:
return "pronto"
if human_score >= 70 and governance_score >= 65:
return "util"
if human_score >= 50 or governance_score >= 50:
return "atencao"
return "fragil"
def action_for_entry(cell: MatrixCell | None, card_status: str, domain_hint: str) -> str:
if card_status in {"critico", "bloqueado"}:
return "resolver blockers de governanca antes de promover experiencia humana"
if cell is None:
return "gerar matriz humana para esta combinacao"
if cell.score < 50:
return "criar tela, relatorio ou comando humano para o perfil"
if cell.gaps:
return f"fechar lacuna principal: {cell.gaps[0]}"
return f"manter regressao e evidencia no dominio {domain_hint}"
def build_registry_entries(reports: Sequence[PlatformHumanReport], portfolio: EcosystemGovernancePortfolio) -> tuple[ReadinessRegistryEntry, ...]:
cells = cell_by_platform_profile(reports)
entries: list[ReadinessRegistryEntry] = []
for report in reports:
card = portfolio.card_for(report.platform.platform_id)
governance_score = card.governance_score if card else 0
card_status = card.status_label if card else "desconhecido"
domain_hint = report.platform.primary_categories[0].value if report.platform.primary_categories else "governance"
for profile in HUMAN_PROFILES:
cell = cells.get((report.platform.platform_id, profile.profile_id))
human_score = cell.score if cell else 0
status = status_for_entry(human_score, governance_score, card_status)
entries.append(
ReadinessRegistryEntry(
entry_id=f"{report.platform.platform_id}.{profile.profile_id}",
platform_id=report.platform.platform_id,
profile_id=profile.profile_id,
platform_title=report.platform.title,
profile_name=profile.name,
human_score=human_score,
governance_score=governance_score,
status=status,
strengths=cell.strengths if cell else (),
gaps=cell.gaps if cell else ("matriz nao gerada",),
evidence_refs=cell.evidence_refs if cell else (),
recommended_action=action_for_entry(cell, card_status, domain_hint),
)
)
return tuple(entries)
def build_platform_summary(entries: Sequence[ReadinessRegistryEntry]) -> tuple[tuple[str, int, int, str], ...]:
rows: list[tuple[str, int, int, str]] = []
platforms = sorted({entry.platform_id for entry in entries})
for platform_id in platforms:
platform_entries = [entry for entry in entries if entry.platform_id == platform_id]
human_avg = round(sum(entry.human_score for entry in platform_entries) / len(platform_entries)) if platform_entries else 0
gov_avg = round(sum(entry.governance_score for entry in platform_entries) / len(platform_entries)) if platform_entries else 0
weak = sum(1 for entry in platform_entries if entry.status in {"fragil", "atencao", "bloqueado", "critico"})
status = "pronto" if weak == 0 else "atencao" if weak < len(platform_entries) else "critico"
rows.append((platform_id, human_avg, gov_avg, status))
return tuple(rows)
def build_profile_summary(entries: Sequence[ReadinessRegistryEntry]) -> tuple[tuple[str, int, int], ...]:
rows: list[tuple[str, int, int]] = []
profiles = sorted({entry.profile_id for entry in entries})
for profile_id in profiles:
profile_entries = [entry for entry in entries if entry.profile_id == profile_id]
avg = round(sum(entry.human_score for entry in profile_entries) / len(profile_entries)) if profile_entries else 0
weak = sum(1 for entry in profile_entries if entry.status in {"fragil", "atencao", "bloqueado", "critico"})
rows.append((profile_id, avg, weak))
return tuple(rows)
def build_domain_gaps(portfolio: EcosystemGovernancePortfolio) -> tuple[tuple[str, str, int], ...]:
rows: list[tuple[str, str, int]] = []
for card in portfolio.cards:
counts: dict[str, int] = {}
for check in card.checks:
if check.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL, GovernanceStatus.BLOCKED}:
counts[check.domain.value] = counts.get(check.domain.value, 0) + 1
for domain, count in sorted(counts.items()):
rows.append((card.platform_id, domain, count))
return tuple(rows)
def build_readiness_registry(reports: Sequence[PlatformHumanReport], portfolio: EcosystemGovernancePortfolio) -> ReadinessRegistry:
entries = build_registry_entries(reports, portfolio)
return ReadinessRegistry(
project_id="tudo-para-ia-mais-humana",
entries=entries,
platform_summary=build_platform_summary(entries),
profile_summary=build_profile_summary(entries),
domain_gaps=build_domain_gaps(portfolio),
)
def registry_markdown(registry: ReadinessRegistry) -> str:
lines = [
"# Registro de prontidao humana",
"",
f"- project_id: `{registry.project_id}`",
f"- entradas: `{len(registry.entries)}`",
f"- entradas frageis: `{len(registry.weak_entries)}`",
"",
"## Plataformas",
"",
]
for platform_id, human_avg, gov_avg, status in registry.platform_summary:
title = PLATFORM_BY_ID[platform_id].title if platform_id in PLATFORM_BY_ID else platform_id
lines.append(f"- `{platform_id}` {title}: humano `{human_avg}`, governanca `{gov_avg}`, status `{status}`")
lines.extend(["", "## Perfis", ""])
for profile_id, avg, weak in registry.profile_summary:
lines.append(f"- `{profile_id}`: score medio `{avg}`, plataformas com atencao `{weak}`")
lines.extend(["", "## Lacunas por dominio", ""])
for platform_id, domain, count in registry.domain_gaps:
lines.append(f"- `{platform_id}` / `{domain}`: `{count}` checks em atencao/falha/bloqueio")
lines.extend(["", "## Entradas mais frageis", ""])
for entry in sorted(registry.weak_entries, key=lambda item: (item.human_score, item.governance_score, item.platform_id, item.profile_id))[:40]:
lines.append(
f"- `{entry.platform_id}` -> `{entry.profile_id}`: status `{entry.status}`, "
f"humano `{entry.human_score}`, governanca `{entry.governance_score}`; acao: {entry.recommended_action}"
)
return "\n".join(lines).strip() + "\n"
def registry_rows(registry: ReadinessRegistry) -> list[list[str]]:
rows = [["platform", "profile", "human_score", "governance_score", "status", "action"]]
for entry in sorted(registry.entries, key=lambda item: (item.platform_id, item.profile_id)):
rows.append(
[
entry.platform_id,
entry.profile_id,
str(entry.human_score),
str(entry.governance_score),
entry.status,
entry.recommended_action,
]
)
return rows
def registry_gap_actions(registry: ReadinessRegistry, limit: int = 20) -> tuple[str, ...]:
actions: list[str] = []
for entry in sorted(registry.weak_entries, key=lambda item: (item.human_score, item.platform_id, item.profile_id)):
actions.append(f"{entry.platform_id}/{entry.profile_id}: {entry.recommended_action}")
if len(actions) >= limit:
break
return merge_unique(actions)

270
src/mais_humana/insights.py Normal file
View File

@@ -0,0 +1,270 @@
"""Derived insights for roadmap, risk, dependencies, and human priorities."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
from .catalog import HUMAN_PROFILES, PLATFORM_BY_ID
from .models import MatrixCell, PlatformHumanReport, Recommendation, as_plain_data, score_label
@dataclass(slots=True)
class DependencyEdge:
source_platform: str
target_platform: str
reason: str
strength: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class RiskItem:
risk_id: str
platform_id: str
title: str
severity: str
evidence: tuple[str, ...]
impact: str
mitigation: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class RoadmapItem:
roadmap_id: str
platform_id: str
title: str
why_now: str
expected_human_change: str
suggested_sequence: int
linked_recommendations: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class ProfileCoverage:
profile_id: str
best_platforms: tuple[str, ...]
weakest_platforms: tuple[str, ...]
average_score: int
reading: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class EcosystemInsights:
dependencies: tuple[DependencyEdge, ...]
risks: tuple[RiskItem, ...]
roadmap: tuple[RoadmapItem, ...]
profile_coverage: tuple[ProfileCoverage, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def build_dependency_edges(reports: Sequence[PlatformHumanReport]) -> tuple[DependencyEdge, ...]:
available = {report.platform.platform_id for report in reports}
edges: list[DependencyEdge] = []
for report in reports:
for target in report.platform.related_platforms:
strength = "forte" if target in available else "planejada"
title = PLATFORM_BY_ID[target].title if target in PLATFORM_BY_ID else target
edges.append(
DependencyEdge(
source_platform=report.platform.platform_id,
target_platform=target,
reason=f"{report.platform.title} depende ou conversa com {title} para entregar experiencia humana completa.",
strength=strength,
)
)
return tuple(edges)
def severity_from_score(score: int) -> str:
if score < 25:
return "critica"
if score < 45:
return "alta"
if score < 65:
return "media"
return "baixa"
def build_risks(reports: Sequence[PlatformHumanReport]) -> tuple[RiskItem, ...]:
risks: list[RiskItem] = []
for report in reports:
if not report.scan.exists:
risks.append(
RiskItem(
risk_id=f"{report.platform.platform_id}-repo-ausente",
platform_id=report.platform.platform_id,
title="Repositorio real ausente",
severity="critica",
evidence=(report.scan.repo_path,),
impact="Nao ha base material para validar atendimento humano.",
mitigation="Criar ou clonar repositorio real e registrar sincronizacao.",
)
)
if report.scan.exists and not report.scan.git_present:
risks.append(
RiskItem(
risk_id=f"{report.platform.platform_id}-git-ausente",
platform_id=report.platform.platform_id,
title="Git ausente no repositorio real",
severity="alta",
evidence=(report.scan.repo_path,),
impact="Nao ha commit, hash ou sincronizacao confiavel.",
mitigation="Inicializar Git, configurar origin e registrar status.",
)
)
if report.platform.known_blockers:
risks.append(
RiskItem(
risk_id=f"{report.platform.platform_id}-blockers-catalogados",
platform_id=report.platform.platform_id,
title="Bloqueio catalogado de maturidade",
severity="alta",
evidence=report.platform.known_blockers,
impact="Pode haver contradicao entre estado tecnico e prontidao humana.",
mitigation="Resolver, formalizar excecao ou isolar o bloqueio em readiness.",
)
)
if report.average_score < 55:
weak = sorted(report.cells, key=lambda cell: cell.score)[:4]
risks.append(
RiskItem(
risk_id=f"{report.platform.platform_id}-score-humano-baixo",
platform_id=report.platform.platform_id,
title="Score humano baixo",
severity=severity_from_score(report.average_score),
evidence=tuple(f"{cell.profile_id}:{cell.score}" for cell in weak),
impact="Perfis humanos importantes podem nao entender ou usar a plataforma.",
mitigation="Priorizar lacunas dos perfis mais fracos e gerar telas/relatorios.",
)
)
risks.sort(key=lambda item: ({"critica": 0, "alta": 1, "media": 2, "baixa": 3}.get(item.severity, 4), item.platform_id))
return tuple(risks)
def build_roadmap(reports: Sequence[PlatformHumanReport], recommendations: Sequence[Recommendation]) -> tuple[RoadmapItem, ...]:
grouped: dict[str, list[Recommendation]] = {}
for rec in recommendations:
grouped.setdefault(rec.platform_id, []).append(rec)
low_reports = sorted(reports, key=lambda report: report.average_score)
roadmap: list[RoadmapItem] = []
sequence = 1
for report in low_reports:
recs = sorted(grouped.get(report.platform.platform_id, ()), key=lambda rec: -rec.priority)[:3]
if not recs:
continue
weakest = sorted(report.cells, key=lambda cell: cell.score)[:3]
roadmap.append(
RoadmapItem(
roadmap_id=f"roadmap-{sequence:02d}-{report.platform.platform_id}",
platform_id=report.platform.platform_id,
title=f"Elevar {report.platform.title} para atendimento humano verificavel",
why_now=(
f"Score medio {report.average_score}; perfis mais frageis: "
+ ", ".join(f"{cell.profile_id}:{cell.score}" for cell in weakest)
),
expected_human_change="Pessoas deixam de depender de leitura tecnica e passam a receber tela, relatorio ou proxima acao clara.",
suggested_sequence=sequence,
linked_recommendations=tuple(rec.recommendation_id for rec in recs),
)
)
sequence += 1
if sequence > 12:
break
return tuple(roadmap)
def build_profile_coverage(reports: Sequence[PlatformHumanReport]) -> tuple[ProfileCoverage, ...]:
cells: list[MatrixCell] = [cell for report in reports for cell in report.cells]
coverage: list[ProfileCoverage] = []
for profile in HUMAN_PROFILES:
profile_cells = [cell for cell in cells if cell.profile_id == profile.profile_id]
if not profile_cells:
coverage.append(
ProfileCoverage(
profile_id=profile.profile_id,
best_platforms=(),
weakest_platforms=(),
average_score=0,
reading="Sem celulas de matriz para este perfil.",
)
)
continue
ordered = sorted(profile_cells, key=lambda cell: cell.score, reverse=True)
average = round(sum(cell.score for cell in profile_cells) / len(profile_cells))
best = tuple(f"{cell.platform_id}:{cell.score}" for cell in ordered[:4])
weakest = tuple(f"{cell.platform_id}:{cell.score}" for cell in ordered[-4:])
coverage.append(
ProfileCoverage(
profile_id=profile.profile_id,
best_platforms=best,
weakest_platforms=weakest,
average_score=average,
reading=(
f"{profile.name} tem cobertura media {average} ({score_label(average)}). "
"As melhores plataformas devem virar referencia; as mais fracas precisam de OS direcionada."
),
)
)
return tuple(coverage)
def build_insights(
reports: Sequence[PlatformHumanReport],
recommendations: Sequence[Recommendation],
) -> EcosystemInsights:
return EcosystemInsights(
dependencies=build_dependency_edges(reports),
risks=build_risks(reports),
roadmap=build_roadmap(reports, recommendations),
profile_coverage=build_profile_coverage(reports),
)
def insights_markdown(insights: EcosystemInsights) -> str:
lines = ["# Insights operacionais Mais Humana", ""]
lines.append("## Riscos")
lines.append("")
for risk in insights.risks[:20]:
lines.append(f"- `{risk.severity}` {risk.platform_id}: {risk.title}. Impacto: {risk.impact} Mitigacao: {risk.mitigation}")
lines.append("")
lines.append("## Roadmap sugerido")
lines.append("")
for item in insights.roadmap:
lines.append(f"- {item.suggested_sequence}. {item.title}: {item.why_now}")
lines.append("")
lines.append("## Cobertura por perfil")
lines.append("")
for coverage in insights.profile_coverage:
lines.append(f"- {coverage.profile_id}: {coverage.reading}")
lines.append("")
lines.append("## Dependencias")
lines.append("")
for edge in insights.dependencies:
lines.append(f"- {edge.source_platform} -> {edge.target_platform} ({edge.strength}): {edge.reason}")
return "\n".join(lines).strip() + "\n"
def dependency_dot(insights: EcosystemInsights) -> str:
lines = ["digraph mais_humana {", " rankdir=LR;"]
for edge in insights.dependencies:
lines.append(
f' "{edge.source_platform}" -> "{edge.target_platform}" '
f'[label="{edge.strength}"];'
)
lines.append("}")
return "\n".join(lines) + "\n"

446
src/mais_humana/matrix.py Normal file
View File

@@ -0,0 +1,446 @@
"""Scoring and matrix generation for human service coverage."""
from __future__ import annotations
from collections import Counter
from typing import Iterable, Sequence
from .catalog import HUMAN_PROFILES, CATEGORY_KEYWORDS, categories_for_text
from .models import (
EvidenceKind,
HumanProfile,
MatrixCell,
MaturityLevel,
NeedCategory,
PlatformDefinition,
PlatformHumanReport,
PlatformScan,
Recommendation,
OrderType,
clamp_score,
maturity_from_score,
merge_unique,
score_label,
)
EVIDENCE_WEIGHTS: dict[EvidenceKind, int] = {
EvidenceKind.README: 5,
EvidenceKind.PACKAGE_SCRIPT: 7,
EvidenceKind.ROUTE: 8,
EvidenceKind.OPENAPI: 12,
EvidenceKind.TEST: 10,
EvidenceKind.CONFIG: 5,
EvidenceKind.DOC: 5,
EvidenceKind.WORKER: 8,
EvidenceKind.STORAGE: 8,
EvidenceKind.MCP_TOOL: 12,
EvidenceKind.UI_SURFACE: 11,
EvidenceKind.SECURITY: 12,
EvidenceKind.BUSINESS_RULE: 10,
EvidenceKind.OBSERVABILITY: 11,
EvidenceKind.UNKNOWN: 2,
}
PROFILE_SIGNAL_BONUS: dict[NeedCategory, tuple[EvidenceKind, ...]] = {
NeedCategory.ADMINISTRATION: (EvidenceKind.MCP_TOOL, EvidenceKind.UI_SURFACE, EvidenceKind.ROUTE),
NeedCategory.SUPPORT: (EvidenceKind.OBSERVABILITY, EvidenceKind.ROUTE, EvidenceKind.DOC),
NeedCategory.FINANCE: (EvidenceKind.BUSINESS_RULE, EvidenceKind.ROUTE, EvidenceKind.OPENAPI),
NeedCategory.LEGAL: (EvidenceKind.DOC, EvidenceKind.SECURITY, EvidenceKind.OBSERVABILITY),
NeedCategory.SECURITY: (EvidenceKind.SECURITY, EvidenceKind.OPENAPI, EvidenceKind.TEST),
NeedCategory.OPERATIONS: (EvidenceKind.OBSERVABILITY, EvidenceKind.PACKAGE_SCRIPT, EvidenceKind.WORKER),
NeedCategory.STRATEGY: (EvidenceKind.DOC, EvidenceKind.OBSERVABILITY, EvidenceKind.MCP_TOOL),
NeedCategory.DOCUMENTATION: (EvidenceKind.README, EvidenceKind.DOC, EvidenceKind.OPENAPI),
NeedCategory.SELF_SERVICE: (EvidenceKind.UI_SURFACE, EvidenceKind.ROUTE, EvidenceKind.MCP_TOOL),
NeedCategory.COMMERCIAL: (EvidenceKind.BUSINESS_RULE, EvidenceKind.OPENAPI, EvidenceKind.ROUTE),
NeedCategory.EXPERIENCE: (EvidenceKind.UI_SURFACE, EvidenceKind.README, EvidenceKind.MCP_TOOL),
NeedCategory.GOVERNANCE: (EvidenceKind.OBSERVABILITY, EvidenceKind.SECURITY, EvidenceKind.OPENAPI),
NeedCategory.INTEGRATION: (EvidenceKind.MCP_TOOL, EvidenceKind.WORKER, EvidenceKind.SECURITY),
NeedCategory.OBSERVABILITY: (EvidenceKind.OBSERVABILITY, EvidenceKind.TEST, EvidenceKind.ROUTE),
}
def evidence_counter(scan: PlatformScan) -> Counter[EvidenceKind]:
counter: Counter[EvidenceKind] = Counter()
for evidence in scan.evidence:
counter[evidence.kind] += 1
return counter
def category_text_score(scan: PlatformScan, categories: Iterable[NeedCategory]) -> int:
text = " ".join(
[
scan.platform.title,
scan.platform.mission,
scan.readme_excerpt,
" ".join(evidence.summary for evidence in scan.evidence[:120]),
]
).lower()
score = 0
for category in categories:
keywords = CATEGORY_KEYWORDS.get(category, ())
hits = sum(1 for keyword in keywords if keyword.lower() in text)
score += min(14, hits * 3)
return min(32, score)
def base_repository_score(scan: PlatformScan) -> int:
if not scan.exists:
return 0
score = 8
if scan.git_present:
score += 7
if scan.readme_excerpt:
score += 8
if scan.code_lines > 0:
score += 8
if scan.code_lines >= 1000:
score += 4
if scan.code_lines >= 5000:
score += 4
if scan.has_tests:
score += 8
if scan.has_openapi:
score += 8
if scan.has_worker:
score += 4
if scan.scripts:
score += min(7, len(scan.scripts))
return min(64, score)
def profile_alignment_score(scan: PlatformScan, profile: HumanProfile) -> int:
score = 0
counter = evidence_counter(scan)
for category in profile.priority_needs:
if category in scan.platform.primary_categories:
score += 8
for kind in PROFILE_SIGNAL_BONUS.get(category, ()):
score += min(9, counter[kind] * 2)
if profile.profile_id in scan.platform.expected_profiles:
score += 14
categories_from_text = set(categories_for_text(scan.readme_excerpt))
score += len(categories_from_text.intersection(profile.priority_needs)) * 4
return min(44, score)
def penalty_score(scan: PlatformScan, profile: HumanProfile) -> int:
penalty = 0
warning_text = " ".join(scan.warnings).lower()
if "sem .git" in warning_text:
penalty += 8
if "nenhuma linha de codigo" in warning_text:
penalty += 20
if "testes nao encontrados" in warning_text and NeedCategory.OPERATIONS in profile.priority_needs:
penalty += 6
if "openapi" in warning_text and NeedCategory.INTEGRATION in profile.priority_needs:
penalty += 6
if scan.platform.known_blockers:
penalty += min(14, 5 * len(scan.platform.known_blockers))
return penalty
def build_strengths(scan: PlatformScan, profile: HumanProfile, score: int) -> tuple[str, ...]:
strengths: list[str] = []
if scan.exists:
strengths.append("repositorio real encontrado")
if scan.git_present:
strengths.append("historico Git local disponivel")
if scan.readme_excerpt:
strengths.append("README tecnico fornece contexto inicial")
if scan.has_tests:
strengths.append("testes foram detectados")
if scan.has_openapi:
strengths.append("contrato OpenAPI foi detectado")
if scan.has_worker:
strengths.append("sinais de Worker/Cloudflare foram detectados")
if profile.profile_id in scan.platform.expected_profiles:
strengths.append(f"plataforma declarada como relevante para {profile.name}")
for category in profile.priority_needs:
if category in scan.platform.primary_categories:
strengths.append(f"categoria {category.value} e parte do papel principal da plataforma")
if score >= 75:
strengths.append("pontuacao indica atendimento humano forte ou pronto")
return merge_unique(strengths)[:8]
def build_gaps(scan: PlatformScan, profile: HumanProfile, score: int) -> tuple[str, ...]:
gaps: list[str] = []
if not scan.exists:
gaps.append("repositorio real nao existe no espelho local")
if scan.exists and not scan.git_present:
gaps.append("repositorio precisa de Git inicializado e remoto configurado")
if scan.exists and not scan.readme_excerpt:
gaps.append("falta README tecnico para leitura humana")
if scan.exists and not scan.has_tests:
gaps.append("faltam testes detectaveis para provar comportamento")
if scan.exists and not scan.has_openapi and NeedCategory.INTEGRATION in profile.priority_needs:
gaps.append("falta contrato OpenAPI ou equivalente para integracao auditavel")
if scan.platform.known_blockers:
gaps.extend(scan.platform.known_blockers)
missing_categories = [category.value for category in profile.priority_needs if category not in scan.platform.primary_categories]
if score < 60 and missing_categories:
gaps.append("categorias humanas secundarias precisam de explicacao: " + ", ".join(missing_categories[:4]))
if score < 40:
gaps.append("atendimento humano ainda aparece mais planejado do que operacional")
return merge_unique(gaps)[:8]
def evidence_refs(scan: PlatformScan, profile: HumanProfile) -> tuple[str, ...]:
desired_kinds: set[EvidenceKind] = set()
for category in profile.priority_needs:
desired_kinds.update(PROFILE_SIGNAL_BONUS.get(category, ()))
refs: list[str] = []
for evidence in scan.evidence:
if evidence.kind in desired_kinds or evidence.is_strong():
refs.append(evidence.reference)
if len(refs) >= 8:
break
return merge_unique(refs)
def explain_cell(scan: PlatformScan, profile: HumanProfile, score: int, maturity: MaturityLevel) -> str:
label = score_label(score)
if not scan.exists:
return f"{profile.name} ainda nao tem base local analisavel em {scan.platform.title}."
if score >= 75:
return (
f"{scan.platform.title} atende {profile.name} em nivel {label}, "
f"com maturidade {maturity.value} e evidencias tecnicas suficientes para leitura humana."
)
if score >= 50:
return (
f"{scan.platform.title} ja oferece sinais uteis para {profile.name}, "
"mas precisa transformar capacidades tecnicas em telas, relatorios ou comandos mais claros."
)
return (
f"{scan.platform.title} ainda parece {label} para {profile.name}; "
"a proxima evolucao deve explicitar necessidades humanas, evidencias e criterio de pronto."
)
def score_cell(scan: PlatformScan, profile: HumanProfile) -> MatrixCell:
score = base_repository_score(scan)
score += profile_alignment_score(scan, profile)
score += category_text_score(scan, profile.priority_needs)
score -= penalty_score(scan, profile)
score = clamp_score(score)
maturity = maturity_from_score(score)
return MatrixCell(
platform_id=scan.platform.platform_id,
profile_id=profile.profile_id,
score=score,
maturity=maturity,
explanation=explain_cell(scan, profile, score, maturity),
strengths=build_strengths(scan, profile, score),
gaps=build_gaps(scan, profile, score),
evidence_refs=evidence_refs(scan, profile),
)
def build_matrix(scans: Sequence[PlatformScan], profiles: Sequence[HumanProfile] = HUMAN_PROFILES) -> tuple[MatrixCell, ...]:
cells: list[MatrixCell] = []
for scan in scans:
for profile in profiles:
cells.append(score_cell(scan, profile))
return tuple(cells)
def cells_for_platform(cells: Sequence[MatrixCell], platform_id: str) -> tuple[MatrixCell, ...]:
return tuple(cell for cell in cells if cell.platform_id == platform_id)
def top_gaps(cells: Sequence[MatrixCell], limit: int = 8) -> tuple[str, ...]:
gaps: list[str] = []
for cell in sorted(cells, key=lambda item: item.score):
gaps.extend(cell.gaps)
if len(gaps) >= limit:
break
return merge_unique(gaps)[:limit]
def top_strengths(cells: Sequence[MatrixCell], limit: int = 8) -> tuple[str, ...]:
strengths: list[str] = []
for cell in sorted(cells, key=lambda item: item.score, reverse=True):
strengths.extend(cell.strengths)
if len(strengths) >= limit:
break
return merge_unique(strengths)[:limit]
def build_recommendations_for_scan(scan: PlatformScan, cells: Sequence[MatrixCell]) -> tuple[Recommendation, ...]:
recommendations: list[Recommendation] = []
platform_id = scan.platform.platform_id
low_cells = [cell for cell in cells if cell.score < 60]
if not scan.exists:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-criar-repositorio-real",
platform_id=platform_id,
title="Criar e inicializar repositorio real da plataforma",
reason="A plataforma nao possui base local analisavel.",
expected_impact="Permitir execucao tecnica, versionamento e sincronizacao.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
priority=100,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(scan.repo_path,),
validation_steps=("git status", "git remote -v", "linha de base criada"),
)
)
if scan.exists and not scan.git_present:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-inicializar-git",
platform_id=platform_id,
title="Inicializar Git e configurar origin correto",
reason="O repositorio existe sem .git, impedindo rastreabilidade e sincronizacao.",
expected_impact="Fechar base operacional minima para commits, push e hash final.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
priority=95,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(scan.repo_path,),
validation_steps=("git status --short --branch", "git remote -v"),
)
)
if scan.exists and not scan.readme_excerpt:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-readme-humano",
platform_id=platform_id,
title="Criar README tecnico e humano da plataforma",
reason="Sem README, o estado tecnico nao vira compreensao humana inicial.",
expected_impact="Dar missao, escopo, comandos e criterios de validacao.",
categories=(NeedCategory.DOCUMENTATION, NeedCategory.EXPERIENCE),
priority=90,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(f"{scan.repo_path}/README.md",),
validation_steps=("README revisado", "links e comandos existentes"),
)
)
if scan.exists and not scan.has_tests:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-testes-canonicos",
platform_id=platform_id,
title="Criar testes canonicos de leitura humana",
reason="A varredura nao encontrou testes suficientes para validar comportamento.",
expected_impact="Aumentar confianca de suporte, operacao e auditoria.",
categories=(NeedCategory.OPERATIONS, NeedCategory.OBSERVABILITY),
priority=75,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(f"{scan.repo_path}/tests",),
validation_steps=("suite local executada", "relatorio de testes registrado"),
)
)
if low_cells:
weakest = sorted(low_cells, key=lambda cell: cell.score)[:4]
profile_ids = ", ".join(cell.profile_id for cell in weakest)
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-matriz-perfis-fracos",
platform_id=platform_id,
title="Fechar lacunas da matriz humana por perfil",
reason=f"Perfis com baixo atendimento detectados: {profile_ids}.",
expected_impact="Transformar capacidade tecnica em telas, relatorios e mensagens de acao.",
categories=(NeedCategory.EXPERIENCE, NeedCategory.GOVERNANCE, NeedCategory.SUPPORT),
priority=70,
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(scan.repo_path,),
validation_steps=("matriz regenerada", "lacunas reclassificadas", "OS de continuidade criada"),
)
)
if scan.platform.known_blockers:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-bloqueios-conhecidos",
platform_id=platform_id,
title="Resolver ou formalizar bloqueios conhecidos",
reason="A plataforma possui bloqueios de maturidade ja mapeados.",
expected_impact="Reduzir contradicao entre readiness tecnico e utilidade humana.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OBSERVABILITY),
priority=85,
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(scan.repo_path,),
validation_steps=("bloqueios documentados", "status reavaliado", "evidencia anexada"),
)
)
recommendations.sort(key=lambda item: (-item.priority, item.title))
return tuple(recommendations)
def build_platform_report(scan: PlatformScan, all_cells: Sequence[MatrixCell]) -> PlatformHumanReport:
cells = cells_for_platform(all_cells, scan.platform.platform_id)
recommendations = build_recommendations_for_scan(scan, cells)
strengths = top_strengths(cells)
gaps = top_gaps(cells)
if scan.exists:
summary = (
f"{scan.platform.title} foi analisada com {scan.code_lines} linhas de codigo e "
f"{len(scan.evidence)} evidencias locais. Score medio humano: "
f"{round(sum(cell.score for cell in cells) / len(cells)) if cells else 0}."
)
else:
summary = f"{scan.platform.title} nao possui repositorio local analisavel."
current_state = tuple(strengths) or ("estado inicial sem evidencias fortes",)
future_state = tuple(
[
"telas e relatorios devem responder quem e atendido, como e atendido e qual proxima acao",
"evidencias devem ser exportaveis para GPT, painel e central de ordens",
"cada lacuna humana deve gerar OS executavel com validacao clara",
]
)
missing = tuple(gaps) or ("nenhuma lacuna principal detectada pela matriz atual",)
return PlatformHumanReport(
platform=scan.platform,
scan=scan,
cells=cells,
recommendations=recommendations,
summary=summary,
current_state=current_state,
future_state=future_state,
missing_for_humans=missing,
)
def build_platform_reports(scans: Sequence[PlatformScan], cells: Sequence[MatrixCell]) -> tuple[PlatformHumanReport, ...]:
return tuple(build_platform_report(scan, cells) for scan in scans)
def build_global_recommendations(reports: Sequence[PlatformHumanReport]) -> tuple[Recommendation, ...]:
recommendations: list[Recommendation] = []
for report in reports:
recommendations.extend(report.recommendations[:3])
average_by_platform = sorted(reports, key=lambda report: report.average_score)
for report in average_by_platform[:5]:
recommendations.append(
Recommendation(
recommendation_id=f"global-elevar-{report.platform.platform_id}",
platform_id=report.platform.platform_id,
title=f"Elevar maturidade humana de {report.platform.title}",
reason=f"Score medio atual {report.average_score}; lacunas principais exigem continuidade.",
expected_impact="Aumentar clareza para administradores, suporte, clientes e planejamento.",
categories=report.platform.primary_categories,
priority=65 + max(0, 60 - report.average_score),
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(report.scan.repo_path,),
validation_steps=("relatorio regenerado", "score comparado", "pendencias atualizadas"),
)
)
recommendations.sort(key=lambda item: (-item.priority, item.platform_id, item.title))
return tuple(recommendations)
def matrix_table(cells: Sequence[MatrixCell], profiles: Sequence[HumanProfile] = HUMAN_PROFILES) -> list[list[str]]:
platform_ids = sorted({cell.platform_id for cell in cells})
by_pair = {(cell.platform_id, cell.profile_id): cell for cell in cells}
header = ["platform"] + [profile.profile_id for profile in profiles]
rows = [header]
for platform_id in platform_ids:
row = [platform_id]
for profile in profiles:
cell = by_pair.get((platform_id, profile.profile_id))
row.append(str(cell.score if cell else 0))
rows.append(row)
return rows

526
src/mais_humana/models.py Normal file
View File

@@ -0,0 +1,526 @@
"""Core models for human-centered ecosystem analysis.
The module intentionally keeps the data model dependency-free. The platform is
expected to run inside operational mirrors where installing new packages is not
always desirable, so every object can be serialized with the standard library.
"""
from __future__ import annotations
from dataclasses import dataclass, field, fields, is_dataclass
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, Mapping, MutableMapping, Sequence
def utc_now() -> str:
"""Return an ISO-8601 timestamp without relying on local locale settings."""
return datetime.now(timezone.utc).replace(microsecond=0).isoformat()
def slugify(value: str) -> str:
"""Create a stable ASCII-ish slug for filenames and object ids."""
allowed: list[str] = []
last_dash = False
for char in value.lower().strip():
if "a" <= char <= "z" or "0" <= char <= "9":
allowed.append(char)
last_dash = False
elif char in {" ", "_", "-", ".", "/", "\\"} and not last_dash:
allowed.append("-")
last_dash = True
return "".join(allowed).strip("-") or "item"
def as_plain_data(value: Any) -> Any:
"""Convert dataclasses, enums, paths, and nested values to JSON-safe data."""
if isinstance(value, Enum):
return value.value
if isinstance(value, Path):
return str(value)
if is_dataclass(value):
return {item.name: as_plain_data(getattr(value, item.name)) for item in fields(value)}
if isinstance(value, Mapping):
return {str(key): as_plain_data(inner) for key, inner in value.items()}
if isinstance(value, (list, tuple, set)):
return [as_plain_data(item) for item in value]
return value
class EvidenceKind(str, Enum):
"""Kinds of evidence detected in a platform repository."""
README = "readme"
PACKAGE_SCRIPT = "package_script"
ROUTE = "route"
OPENAPI = "openapi"
TEST = "test"
CONFIG = "config"
DOC = "doc"
WORKER = "worker"
STORAGE = "storage"
MCP_TOOL = "mcp_tool"
UI_SURFACE = "ui_surface"
SECURITY = "security"
BUSINESS_RULE = "business_rule"
OBSERVABILITY = "observability"
UNKNOWN = "unknown"
class NeedCategory(str, Enum):
"""Human need categories used by the matrix engine."""
ADMINISTRATION = "administration"
SUPPORT = "support"
FINANCE = "finance"
LEGAL = "legal"
SECURITY = "security"
OPERATIONS = "operations"
STRATEGY = "strategy"
DOCUMENTATION = "documentation"
SELF_SERVICE = "self_service"
COMMERCIAL = "commercial"
EXPERIENCE = "experience"
GOVERNANCE = "governance"
INTEGRATION = "integration"
OBSERVABILITY = "observability"
class MaturityLevel(str, Enum):
"""Operational maturity level from a human point of view."""
NOT_FOUND = "not_found"
PLANNED = "planned"
CATALOGED = "cataloged"
TECHNICAL = "technical"
EXPLAINABLE = "explainable"
ACTIONABLE = "actionable"
READY_FOR_HUMAN = "ready_for_human"
AUDITABLE = "auditable"
class OrderType(str, Enum):
"""Service-order type used by the order generator."""
EXECUTIVE = "executiva"
MANAGERIAL = "gerencial"
class OrderStatus(str, Enum):
"""Compact status for generated and executed orders."""
PLANNED = "planejada"
RUNNING = "em_execucao"
COMPLETED = "concluida"
PARTIAL = "parcial"
BLOCKED = "bloqueada"
@dataclass(slots=True)
class Evidence:
"""A small, inspectable proof that a capability exists or is missing."""
kind: EvidenceKind
path: str
summary: str
line: int | None = None
confidence: float = 0.5
tags: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def reference(self) -> str:
if self.line is None:
return self.path
return f"{self.path}:{self.line}"
def is_strong(self) -> bool:
return self.confidence >= 0.75
@dataclass(slots=True)
class HumanNeed:
"""A concrete need a person may have while using the ecosystem."""
need_id: str
title: str
category: NeedCategory
description: str
success_markers: tuple[str, ...]
risk_if_missing: str
expected_surfaces: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class HumanProfile:
"""A human role that should be served by one or more platforms."""
profile_id: str
name: str
description: str
priority_needs: tuple[NeedCategory, ...]
typical_questions: tuple[str, ...]
expected_outputs: tuple[str, ...]
sensitive_concerns: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
def wants(self, category: NeedCategory) -> bool:
return category in self.priority_needs
@dataclass(slots=True)
class PlatformDefinition:
"""Canonical description of a managed platform."""
platform_id: str
repo_name: str
central_folder: str
title: str
mission: str
primary_categories: tuple[NeedCategory, ...]
expected_profiles: tuple[str, ...]
related_platforms: tuple[str, ...] = ()
expected_surfaces: tuple[str, ...] = ()
known_blockers: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class FileMetric:
"""Line and extension metric for a repository file."""
path: str
extension: str
lines: int
bytes_size: int
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class ScriptCommand:
"""A command exposed by package.json or other local metadata."""
name: str
command: str
source_file: str
intent: str = "unknown"
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class PlatformScan:
"""Repository scan result used as input for matrix and report generation."""
platform: PlatformDefinition
repo_path: str
exists: bool
git_present: bool
branch: str | None
head: str | None
remote_origin: str | None
readme_excerpt: str
file_metrics: tuple[FileMetric, ...]
scripts: tuple[ScriptCommand, ...]
evidence: tuple[Evidence, ...]
warnings: tuple[str, ...]
scanned_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def total_lines(self) -> int:
return sum(metric.lines for metric in self.file_metrics)
@property
def code_lines(self) -> int:
code_ext = {".ts", ".tsx", ".js", ".mjs", ".cjs", ".py", ".java"}
return sum(metric.lines for metric in self.file_metrics if metric.extension in code_ext)
@property
def has_tests(self) -> bool:
return any(item.kind == EvidenceKind.TEST for item in self.evidence)
@property
def has_openapi(self) -> bool:
return any(item.kind == EvidenceKind.OPENAPI for item in self.evidence)
@property
def has_worker(self) -> bool:
return any(item.kind == EvidenceKind.WORKER for item in self.evidence)
@property
def strong_evidence_count(self) -> int:
return sum(1 for item in self.evidence if item.is_strong())
def evidence_by_kind(self, kind: EvidenceKind) -> tuple[Evidence, ...]:
return tuple(item for item in self.evidence if item.kind == kind)
@dataclass(slots=True)
class MatrixCell:
"""Human service score for one platform and one profile."""
platform_id: str
profile_id: str
score: int
maturity: MaturityLevel
explanation: str
strengths: tuple[str, ...]
gaps: tuple[str, ...]
evidence_refs: tuple[str, ...]
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def normalized_score(self) -> float:
return max(0.0, min(1.0, self.score / 100.0))
def is_ready(self) -> bool:
return self.score >= 75
@dataclass(slots=True)
class Recommendation:
"""Actionable recommendation derived from scan and matrix signals."""
recommendation_id: str
platform_id: str
title: str
reason: str
expected_impact: str
categories: tuple[NeedCategory, ...]
priority: int
suggested_order_type: OrderType
affected_paths: tuple[str, ...] = ()
validation_steps: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class PlatformHumanReport:
"""Report model for one platform."""
platform: PlatformDefinition
scan: PlatformScan
cells: tuple[MatrixCell, ...]
recommendations: tuple[Recommendation, ...]
summary: str
current_state: tuple[str, ...]
future_state: tuple[str, ...]
missing_for_humans: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def average_score(self) -> int:
if not self.cells:
return 0
return round(sum(cell.score for cell in self.cells) / len(self.cells))
@property
def ready_profiles(self) -> tuple[str, ...]:
return tuple(cell.profile_id for cell in self.cells if cell.is_ready())
@dataclass(slots=True)
class EcosystemHumanReport:
"""Full ecosystem report model."""
scans: tuple[PlatformScan, ...]
platform_reports: tuple[PlatformHumanReport, ...]
recommendations: tuple[Recommendation, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def total_code_lines(self) -> int:
return sum(scan.code_lines for scan in self.scans)
@property
def total_files(self) -> int:
return sum(len(scan.file_metrics) for scan in self.scans)
@property
def average_score(self) -> int:
cells = [cell for report in self.platform_reports for cell in report.cells]
if not cells:
return 0
return round(sum(cell.score for cell in cells) / len(cells))
def report_for(self, platform_id: str) -> PlatformHumanReport | None:
for report in self.platform_reports:
if report.platform.platform_id == platform_id:
return report
return None
@dataclass(slots=True)
class ServiceOrder:
"""Generated service order metadata and body."""
order_id: str
order_type: OrderType
project_id: str
title: str
purpose: str
object_scope: str
reason: str
expected_result: str
affected_paths: tuple[str, ...]
validations: tuple[str, ...]
ready_criteria: tuple[str, ...]
status: OrderStatus = OrderStatus.PLANNED
priority: str = "media"
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class GeneratedFile:
"""File generated or updated by the platform runtime."""
path: str
description: str
function: str
file_type: str
changed_by: str
change_summary: str
relation_to_order: str
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(slots=True)
class ReportBundle:
"""Paths and counters produced by a generation run."""
output_root: str
generated_files: tuple[GeneratedFile, ...]
platform_count: int
profile_count: int
matrix_cells: int
total_code_lines_analyzed: int
warnings: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
def clamp_score(value: int | float) -> int:
"""Clamp a numeric score into the 0..100 range."""
return int(max(0, min(100, round(float(value)))))
def maturity_from_score(score: int) -> MaturityLevel:
"""Map numeric score to a human maturity label."""
score = clamp_score(score)
if score == 0:
return MaturityLevel.NOT_FOUND
if score < 20:
return MaturityLevel.PLANNED
if score < 35:
return MaturityLevel.CATALOGED
if score < 50:
return MaturityLevel.TECHNICAL
if score < 65:
return MaturityLevel.EXPLAINABLE
if score < 80:
return MaturityLevel.ACTIONABLE
if score < 92:
return MaturityLevel.READY_FOR_HUMAN
return MaturityLevel.AUDITABLE
def merge_unique(values: Iterable[str]) -> tuple[str, ...]:
"""Return values in input order while dropping empty strings and duplicates."""
seen: set[str] = set()
output: list[str] = []
for value in values:
cleaned = str(value).strip()
if not cleaned or cleaned in seen:
continue
seen.add(cleaned)
output.append(cleaned)
return tuple(output)
def group_by_platform(recommendations: Sequence[Recommendation]) -> dict[str, list[Recommendation]]:
grouped: dict[str, list[Recommendation]] = {}
for item in recommendations:
grouped.setdefault(item.platform_id, []).append(item)
for items in grouped.values():
items.sort(key=lambda rec: (-rec.priority, rec.title))
return grouped
def incrementing_id(prefix: str, index: int, title: str) -> str:
"""Create a readable id with a numeric prefix and a slug."""
return f"{index:04d}_{prefix}__{slugify(title)}"
def summarize_warnings(scans: Sequence[PlatformScan]) -> tuple[str, ...]:
warnings: list[str] = []
for scan in scans:
for warning in scan.warnings:
warnings.append(f"{scan.platform.platform_id}: {warning}")
return merge_unique(warnings)
def score_label(score: int) -> str:
"""Return a compact label for user-facing matrix tables."""
score = clamp_score(score)
if score >= 90:
return "excelente"
if score >= 75:
return "forte"
if score >= 60:
return "util"
if score >= 40:
return "tecnico"
if score >= 20:
return "inicial"
if score > 0:
return "fragil"
return "ausente"
def ensure_mapping(data: MutableMapping[str, Any], key: str, default: Any) -> Any:
"""Small helper used by storage migration code."""
if key not in data:
data[key] = default
return data[key]

View File

@@ -0,0 +1,196 @@
"""Human-readable narratives generated from scans and matrix cells."""
from __future__ import annotations
from typing import Sequence
from .catalog import HUMAN_PROFILES, HUMAN_NEEDS, PROFILE_BY_ID
from .models import MatrixCell, PlatformHumanReport, PlatformScan, Recommendation, score_label
def sentence_list(items: Sequence[str], fallback: str = "nao informado") -> str:
values = [item.strip() for item in items if item and item.strip()]
if not values:
return fallback
if len(values) == 1:
return values[0]
return ", ".join(values[:-1]) + " e " + values[-1]
def platform_intro(scan: PlatformScan) -> str:
if not scan.exists:
return (
f"{scan.platform.title} ainda nao possui repositorio local analisavel. "
"Para pessoas reais, isso significa ausencia de prova operacional na base desta plataforma."
)
git_state = "com Git local" if scan.git_present else "sem Git local"
return (
f"{scan.platform.title} existe em {scan.repo_path}, {git_state}, "
f"com {scan.code_lines} linhas de codigo analisaveis e {len(scan.evidence)} evidencias coletadas."
)
def profile_section(cell: MatrixCell) -> tuple[str, ...]:
profile = PROFILE_BY_ID[cell.profile_id]
lines = [
f"Perfil: {profile.name}",
f"Score: {cell.score} ({score_label(cell.score)}), maturidade: {cell.maturity.value}.",
f"Leitura: {cell.explanation}",
"Forcas: " + sentence_list(cell.strengths),
"Lacunas: " + sentence_list(cell.gaps),
]
if cell.evidence_refs:
lines.append("Evidencias: " + sentence_list(cell.evidence_refs[:5]))
return tuple(lines)
def current_state_paragraph(report: PlatformHumanReport) -> str:
return (
"Estado atual humano: "
+ sentence_list(report.current_state, "a plataforma precisa de evidencias iniciais")
+ "."
)
def future_state_paragraph(report: PlatformHumanReport) -> str:
return "Estado futuro esperado: " + sentence_list(report.future_state) + "."
def missing_state_paragraph(report: PlatformHumanReport) -> str:
return "O que ainda falta para atender melhor: " + sentence_list(report.missing_for_humans) + "."
def recommendation_paragraph(recommendation: Recommendation) -> str:
categories = ", ".join(category.value for category in recommendation.categories)
validations = sentence_list(recommendation.validation_steps, "validacao a definir")
return (
f"{recommendation.title}. Motivo: {recommendation.reason} "
f"Impacto esperado: {recommendation.expected_impact} "
f"Categorias: {categories}. Validacao: {validations}."
)
def platform_report_lines(report: PlatformHumanReport) -> list[str]:
lines: list[str] = []
lines.append(report.platform.title)
lines.append(report.platform.mission)
lines.append(platform_intro(report.scan))
lines.append(report.summary)
lines.append(current_state_paragraph(report))
lines.append(future_state_paragraph(report))
lines.append(missing_state_paragraph(report))
lines.append("Perfis humanos")
for cell in sorted(report.cells, key=lambda item: item.profile_id):
lines.extend(profile_section(cell))
lines.append("Recomendacoes")
for recommendation in report.recommendations[:10]:
lines.append(recommendation_paragraph(recommendation))
if report.scan.warnings:
lines.append("Avisos operacionais: " + sentence_list(report.scan.warnings))
return lines
def ecosystem_summary_lines(reports: Sequence[PlatformHumanReport]) -> list[str]:
total_code = sum(report.scan.code_lines for report in reports)
total_evidence = sum(len(report.scan.evidence) for report in reports)
average = round(sum(report.average_score for report in reports) / len(reports)) if reports else 0
lines = [
"Relatorio Geral do Ecossistema Mais Humano",
(
f"Foram avaliadas {len(reports)} plataformas, com {total_code} linhas de codigo "
f"e {total_evidence} evidencias locais."
),
f"Score medio humano do ecossistema: {average}.",
(
"A pergunta central desta plataforma e simples: quem e atendido, como e atendido, "
"o que ja funciona hoje e o que precisa virar ordem de servico para servir melhor pessoas reais."
),
]
lines.append("Leitura por necessidade humana")
for need in HUMAN_NEEDS:
related = [
report.platform.platform_id
for report in reports
if need.category in report.platform.primary_categories
]
lines.append(
f"{need.title}: plataformas relacionadas {sentence_list(related, 'nenhuma principal')}. "
f"Risco se faltar: {need.risk_if_missing}"
)
lines.append("Plataformas com menor score medio")
for report in sorted(reports, key=lambda item: item.average_score)[:8]:
lines.append(
f"{report.platform.title}: score {report.average_score}; "
f"lacunas principais: {sentence_list(report.missing_for_humans[:3])}."
)
lines.append("Plataformas com maior prontidao humana")
for report in sorted(reports, key=lambda item: item.average_score, reverse=True)[:8]:
lines.append(
f"{report.platform.title}: score {report.average_score}; "
f"forcas: {sentence_list(report.current_state[:3])}."
)
return lines
def markdown_table(headers: Sequence[str], rows: Sequence[Sequence[str]]) -> str:
output = ["| " + " | ".join(headers) + " |"]
output.append("| " + " | ".join("---" for _ in headers) + " |")
for row in rows:
output.append("| " + " | ".join(str(value).replace("|", "/") for value in row) + " |")
return "\n".join(output)
def platform_markdown(report: PlatformHumanReport) -> str:
lines = [f"# {report.platform.title}", "", report.platform.mission, ""]
lines.append("## Sintese")
lines.append("")
lines.append(report.summary)
lines.append("")
lines.append(current_state_paragraph(report))
lines.append("")
lines.append(future_state_paragraph(report))
lines.append("")
lines.append(missing_state_paragraph(report))
lines.append("")
lines.append("## Matriz por perfil")
rows = []
for cell in sorted(report.cells, key=lambda item: item.profile_id):
profile = PROFILE_BY_ID[cell.profile_id]
rows.append([profile.name, str(cell.score), cell.maturity.value, cell.explanation])
lines.append(markdown_table(["Perfil", "Score", "Maturidade", "Leitura"], rows))
lines.append("")
lines.append("## Recomendacoes")
lines.append("")
for recommendation in report.recommendations:
lines.append(f"- {recommendation_paragraph(recommendation)}")
if report.scan.warnings:
lines.append("")
lines.append("## Avisos")
lines.extend(f"- {warning}" for warning in report.scan.warnings)
return "\n".join(lines).strip() + "\n"
def ecosystem_markdown(reports: Sequence[PlatformHumanReport]) -> str:
lines = ["# Relatorio Geral do Ecossistema Mais Humano", ""]
lines.extend(ecosystem_summary_lines(reports)[1:])
lines.append("")
lines.append("## Matriz plataforma x perfil")
rows = []
for report in sorted(reports, key=lambda item: item.platform.platform_id):
strongest = sorted(report.cells, key=lambda item: item.score, reverse=True)[:3]
weakest = sorted(report.cells, key=lambda item: item.score)[:3]
rows.append(
[
report.platform.platform_id,
str(report.average_score),
sentence_list([PROFILE_BY_ID[cell.profile_id].name for cell in strongest]),
sentence_list([PROFILE_BY_ID[cell.profile_id].name for cell in weakest]),
]
)
lines.append(markdown_table(["Plataforma", "Score", "Mais atendidos", "Mais frageis"], rows))
lines.append("")
lines.append("## Perfis considerados")
for profile in HUMAN_PROFILES:
lines.append(f"- {profile.name}: {profile.description}")
return "\n".join(lines).strip() + "\n"

View File

@@ -0,0 +1,903 @@
"""Build and render the operational dossier for a service-order round."""
from __future__ import annotations
from pathlib import Path
from typing import Iterable, Sequence
from .blocker_catalog import build_operational_signals, summarize_blockers, summarize_capabilities
from .models import OrderType, PlatformHumanReport, Recommendation, ServiceOrder, as_plain_data, merge_unique, utc_now
from .operational_models import (
ExecutionRoundDossier,
GateDomain,
GateOutcome,
HumanReadinessStage,
OrderClosureStatus,
OrderJustification,
OperationalSignal,
PlatformOperationalDossier,
ReadinessGate,
SignalKind,
SignalSeverity,
SourceConfidence,
SourceReference,
code_line_count,
find_service_order_platform,
next_actions_from_signals,
severity_rank,
source_refs_from_strings,
stage_rank,
stable_digest,
)
ACTIVE_ORDER_IDS: tuple[str, ...] = (
"0001_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos",
"0002_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos",
"0003_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos",
"0004_EXECUTIVA__elevar-maturidade-humana-de-business-platform",
"0005_EXECUTIVA__elevar-maturidade-humana-de-compliance-platform",
"0012_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos",
"0013_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos",
"0014_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos",
"0015_GERENCIAL__elevar-maturidade-humana-de-business-platform",
"0016_GERENCIAL__elevar-maturidade-humana-de-compliance-platform",
)
ORDER_PLATFORM_MAP: dict[str, str] = {
"0001_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos": "docs",
"0002_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos": "integracoes",
"0003_EXECUTIVA__resolver-ou-formalizar-bloqueios-conhecidos": "intelligence",
"0004_EXECUTIVA__elevar-maturidade-humana-de-business-platform": "business",
"0005_EXECUTIVA__elevar-maturidade-humana-de-compliance-platform": "compliance",
"0012_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos": "docs",
"0013_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos": "integracoes",
"0014_GERENCIAL__resolver-ou-formalizar-bloqueios-conhecidos": "intelligence",
"0015_GERENCIAL__elevar-maturidade-humana-de-business-platform": "business",
"0016_GERENCIAL__elevar-maturidade-humana-de-compliance-platform": "compliance",
}
def gate(
platform_id: str,
gate_id: str,
domain: GateDomain,
title: str,
outcome: GateOutcome,
severity: SignalSeverity,
reason: str,
next_action: str,
evidence: Iterable[SourceReference] = (),
linked_signals: Iterable[str] = (),
) -> ReadinessGate:
return ReadinessGate(
gate_id=f"{platform_id}.{gate_id}",
platform_id=platform_id,
domain=domain,
title=title,
outcome=outcome,
severity=severity,
reason=reason,
next_action=next_action,
evidence=tuple(evidence),
linked_signals=tuple(linked_signals),
)
def signal_refs(signals: Sequence[OperationalSignal], domain: GateDomain | None = None, limit: int = 6) -> tuple[SourceReference, ...]:
refs: list[SourceReference] = []
for signal in signals:
if domain is not None and signal.domain != domain:
continue
refs.extend(signal.sources)
if len(refs) >= limit:
break
return tuple(refs[:limit])
def linked_signal_ids(signals: Sequence[OperationalSignal], domain: GateDomain | None = None, kind: SignalKind | None = None) -> tuple[str, ...]:
ids: list[str] = []
for signal in signals:
if domain is not None and signal.domain != domain:
continue
if kind is not None and signal.kind != kind:
continue
ids.append(signal.signal_id)
return tuple(ids)
def build_repository_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
scan = report.scan
if not scan.exists:
return gate(
report.platform.platform_id,
"repository",
GateDomain.REPOSITORY,
"Repositorio real existe",
GateOutcome.BLOCKED,
SignalSeverity.CRITICAL,
"Repositorio real nao encontrado no espelho local.",
"criar ou clonar repositorio real antes de nova promocao",
signal_refs(signals, GateDomain.REPOSITORY),
linked_signal_ids(signals, GateDomain.REPOSITORY),
)
return gate(
report.platform.platform_id,
"repository",
GateDomain.REPOSITORY,
"Repositorio real existe",
GateOutcome.PASS,
SignalSeverity.INFO,
"Repositorio real encontrado e analisado.",
"manter espelho local sincronizado",
(SourceReference(path=scan.repo_path, summary="Repositorio analisado.", confidence=SourceConfidence.DIRECT),),
linked_signal_ids(signals, GateDomain.REPOSITORY),
)
def build_git_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
scan = report.scan
if scan.git_present:
evidence = source_refs_from_strings(
tuple(value for value in (scan.branch, scan.head, scan.remote_origin) if value) or (scan.repo_path,),
"Metadado Git detectado.",
)
return gate(
report.platform.platform_id,
"git",
GateDomain.REPOSITORY,
"Git local e rastreabilidade",
GateOutcome.PASS,
SignalSeverity.INFO,
"Git local detectado.",
"registrar status e hash no fechamento",
evidence,
linked_signal_ids(signals, GateDomain.REPOSITORY),
)
severity = SignalSeverity.HIGH if scan.exists else SignalSeverity.CRITICAL
outcome = GateOutcome.FAIL if scan.exists else GateOutcome.BLOCKED
return gate(
report.platform.platform_id,
"git",
GateDomain.REPOSITORY,
"Git local e rastreabilidade",
outcome,
severity,
"Git local nao esta disponivel para hash e sincronizacao.",
"corrigir permissao ou inicializar Git com origin correto",
signal_refs(signals, GateDomain.REPOSITORY),
linked_signal_ids(signals, GateDomain.REPOSITORY),
)
def build_documentation_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
scan = report.scan
if scan.readme_excerpt:
return gate(
report.platform.platform_id,
"documentation",
GateDomain.DOCUMENTATION,
"Documentacao humana minima",
GateOutcome.PASS,
SignalSeverity.INFO,
"README ou documentacao principal foi encontrada.",
"manter documentacao reconciliada com gates humanos",
signal_refs(signals, GateDomain.DOCUMENTATION),
linked_signal_ids(signals, GateDomain.DOCUMENTATION),
)
return gate(
report.platform.platform_id,
"documentation",
GateDomain.DOCUMENTATION,
"Documentacao humana minima",
GateOutcome.ATTENTION,
SignalSeverity.MEDIUM,
"README/documentacao inicial nao foi encontrada.",
"criar README tecnico e humano com comandos, surfaces e criterios",
signal_refs(signals, GateDomain.DOCUMENTATION),
linked_signal_ids(signals, GateDomain.DOCUMENTATION),
)
def build_tests_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
if report.scan.has_tests:
return gate(
report.platform.platform_id,
"tests",
GateDomain.TESTS,
"Teste ou smoke detectavel",
GateOutcome.PASS,
SignalSeverity.INFO,
"Testes ou specs foram detectados pela varredura.",
"executar suite aplicavel antes de fechar OS",
signal_refs(signals, GateDomain.TESTS),
linked_signal_ids(signals, GateDomain.TESTS),
)
return gate(
report.platform.platform_id,
"tests",
GateDomain.TESTS,
"Teste ou smoke detectavel",
GateOutcome.ATTENTION,
SignalSeverity.MEDIUM,
"Nenhum teste foi encontrado pela varredura local.",
"criar smoke ou teste canonico do contrato humano",
signal_refs(signals, GateDomain.TESTS),
linked_signal_ids(signals, GateDomain.TESTS),
)
def build_openapi_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
if report.scan.has_openapi:
return gate(
report.platform.platform_id,
"contract",
GateDomain.CONTRACT,
"Contrato de API ou surface auditavel",
GateOutcome.PASS,
SignalSeverity.INFO,
"OpenAPI ou contrato equivalente foi detectado.",
"manter contrato sincronizado com rotas e tools",
signal_refs(signals, GateDomain.CONTRACT),
linked_signal_ids(signals, GateDomain.CONTRACT),
)
severity = SignalSeverity.MEDIUM
if report.platform.platform_id in {"docs", "public", "ui"}:
severity = SignalSeverity.HIGH
return gate(
report.platform.platform_id,
"contract",
GateDomain.CONTRACT,
"Contrato de API ou surface auditavel",
GateOutcome.ATTENTION,
severity,
"OpenAPI nao foi detectada por varredura local.",
"publicar OpenAPI minima ou declarar contrato alternativo versionado",
signal_refs(signals, GateDomain.CONTRACT),
linked_signal_ids(signals, GateDomain.CONTRACT),
)
def build_panel_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
panel_signals = [signal for signal in signals if signal.domain == GateDomain.PANEL]
same_source = any("samesource" in " ".join(signal.tags).lower() or "mesma fonte" in signal.summary.lower() for signal in panel_signals)
panel_ready = any("panelready" in " ".join(signal.tags).lower() or "painel" in signal.summary.lower() for signal in panel_signals)
if same_source and panel_ready:
outcome = GateOutcome.PASS
severity = SignalSeverity.INFO
reason = "Sinais de panelReady e sameSource foram detectados."
action = "executar regressao de hashes de fonte e registros"
elif panel_signals:
outcome = GateOutcome.ATTENTION
severity = SignalSeverity.MEDIUM
reason = "Ha sinais de painel, mas a mesma fonte precisa ficar explicita."
action = "formalizar sourceEndpoint, sourceToolId, sourcePayloadHash e sourceRecordsHash"
else:
outcome = GateOutcome.ATTENTION
severity = SignalSeverity.MEDIUM
reason = "Nenhum contrato de painel humano foi detectado."
action = "criar contrato de tela ou declarar que a plataforma nao entrega painel diretamente"
return gate(
report.platform.platform_id,
"panel",
GateDomain.PANEL,
"Painel humano e mesma fonte",
outcome,
severity,
reason,
action,
signal_refs(signals, GateDomain.PANEL),
linked_signal_ids(signals, GateDomain.PANEL),
)
def build_docs_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
docs_related = report.platform.platform_id == "docs" or "docs" in report.platform.related_platforms
if not docs_related:
return gate(
report.platform.platform_id,
"docs",
GateDomain.DOCS,
"Relacao com Docs",
GateOutcome.NOT_APPLICABLE,
SignalSeverity.INFO,
"Docs nao e dependencia primaria declarada para esta plataforma.",
"manter referencia documental quando houver contrato publico",
(),
linked_signal_ids(signals, GateDomain.DOCS),
)
docs_blockers = [signal for signal in signals if signal.domain == GateDomain.DOCS and signal.is_blocking]
if docs_blockers:
return gate(
report.platform.platform_id,
"docs",
GateDomain.DOCS,
"Relacao com Docs",
GateOutcome.BLOCKED,
SignalSeverity.HIGH,
"Docs possui decisao catalogOnly ou leitura canonica pendente.",
"promover leitura Docs responseReady minima ou registrar excecao formal",
signal_refs(docs_blockers, GateDomain.DOCS),
linked_signal_ids(docs_blockers, GateDomain.DOCS),
)
return gate(
report.platform.platform_id,
"docs",
GateDomain.DOCS,
"Relacao com Docs",
GateOutcome.PASS,
SignalSeverity.INFO,
"A relacao documental nao apresenta blocker formal nesta varredura.",
"manter evidencias e contratos documentais reconciliados",
signal_refs(signals, GateDomain.DOCS),
linked_signal_ids(signals, GateDomain.DOCS),
)
def build_integration_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
integration_related = report.platform.platform_id == "integracoes" or "integracoes" in report.platform.related_platforms
if not integration_related:
return gate(
report.platform.platform_id,
"integration",
GateDomain.INTEGRATION,
"Integracao externa e credenciais",
GateOutcome.NOT_APPLICABLE,
SignalSeverity.INFO,
"Integracoes nao e dependencia primaria declarada.",
"registrar dependencia quando produto exigir provider externo",
(),
linked_signal_ids(signals, GateDomain.INTEGRATION),
)
blockers = [signal for signal in signals if signal.domain == GateDomain.INTEGRATION and signal.is_blocking]
capabilities = [signal for signal in signals if signal.domain == GateDomain.INTEGRATION and signal.kind == SignalKind.CAPABILITY]
if blockers:
return gate(
report.platform.platform_id,
"integration",
GateDomain.INTEGRATION,
"Integracao externa e credenciais",
GateOutcome.BLOCKED,
SignalSeverity.HIGH,
"Ha bloqueio de BYOK, credencial live, provider ou smoke por tenant.",
"provar credentialRef, smoke readonly e nao vazamento por tenant",
signal_refs(blockers, GateDomain.INTEGRATION),
linked_signal_ids(blockers, GateDomain.INTEGRATION),
)
if capabilities:
return gate(
report.platform.platform_id,
"integration",
GateDomain.INTEGRATION,
"Integracao externa e credenciais",
GateOutcome.PASS,
SignalSeverity.INFO,
"A plataforma possui sinais de integracao controlada.",
"manter readiness e redaction dos providers",
signal_refs(capabilities, GateDomain.INTEGRATION),
linked_signal_ids(capabilities, GateDomain.INTEGRATION),
)
return gate(
report.platform.platform_id,
"integration",
GateDomain.INTEGRATION,
"Integracao externa e credenciais",
GateOutcome.ATTENTION,
SignalSeverity.MEDIUM,
"Integracoes e dependencia declarada, mas nao apareceram sinais fortes.",
"mapear provider, credencial, smoke e ownerPlatformId",
signal_refs(signals, GateDomain.INTEGRATION),
linked_signal_ids(signals, GateDomain.INTEGRATION),
)
def build_business_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
business_related = report.platform.platform_id == "business" or "business" in report.platform.related_platforms
if not business_related:
return gate(
report.platform.platform_id,
"business",
GateDomain.BUSINESS,
"Fonte comercial e entitlement",
GateOutcome.NOT_APPLICABLE,
SignalSeverity.INFO,
"Business nao e dependencia primaria declarada.",
"registrar contrato comercial quando houver produto vendavel",
(),
linked_signal_ids(signals, GateDomain.BUSINESS),
)
business_caps = [signal for signal in signals if signal.domain == GateDomain.BUSINESS]
blockers = [signal for signal in business_caps if signal.is_blocking]
if blockers:
return gate(
report.platform.platform_id,
"business",
GateDomain.BUSINESS,
"Fonte comercial e entitlement",
GateOutcome.BLOCKED,
SignalSeverity.HIGH,
"Ha blocker comercial ou de isolamento de produto.",
"isolar readiness por productId e validar regra de venda/piloto",
signal_refs(blockers, GateDomain.BUSINESS),
linked_signal_ids(blockers, GateDomain.BUSINESS),
)
if business_caps:
return gate(
report.platform.platform_id,
"business",
GateDomain.BUSINESS,
"Fonte comercial e entitlement",
GateOutcome.PASS,
SignalSeverity.INFO,
"Ha sinais de entitlement, cobranca, produto ou blocker comercial isolado.",
"manter Business como fonte unica para plano, franquia e bloqueio",
signal_refs(business_caps, GateDomain.BUSINESS),
linked_signal_ids(business_caps, GateDomain.BUSINESS),
)
return gate(
report.platform.platform_id,
"business",
GateDomain.BUSINESS,
"Fonte comercial e entitlement",
GateOutcome.ATTENTION,
SignalSeverity.MEDIUM,
"Business e relacao declarada, mas sinais comerciais nao apareceram nesta varredura.",
"mapear plano, entitlement, consumo, bloqueio e decisao comercial",
signal_refs(signals, GateDomain.BUSINESS),
linked_signal_ids(signals, GateDomain.BUSINESS),
)
def build_cloud_gate(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> ReadinessGate:
cloud_signals = [signal for signal in signals if signal.domain == GateDomain.CLOUD]
plugin_exception = [signal for signal in cloud_signals if "cloudflare-plugin" in signal.tags]
wrangler_or_worker = [signal for signal in cloud_signals if "wrangler" in signal.tags or "cloudflare" in signal.tags]
cloud_blockers = [signal for signal in cloud_signals if signal.is_blocking and "cloudflare-plugin" not in signal.tags]
if cloud_blockers:
return gate(
report.platform.platform_id,
"cloud",
GateDomain.CLOUD,
"Cloudflare e runtime externo",
GateOutcome.ATTENTION,
SignalSeverity.MEDIUM,
"Ha pendencia de binding, token live ou runtime externo; negativa do plugin nao e blocker.",
"usar wrangler para diagnostico real de routes, secrets, deploy e smoke",
signal_refs(cloud_blockers, GateDomain.CLOUD),
linked_signal_ids(cloud_blockers, GateDomain.CLOUD),
)
if wrangler_or_worker:
return gate(
report.platform.platform_id,
"cloud",
GateDomain.CLOUD,
"Cloudflare e runtime externo",
GateOutcome.PASS,
SignalSeverity.INFO,
"Ha evidencia de Worker, Cloudflare ou wrangler.",
"validar runtime com wrangler quando a ordem exigir deploy/health",
signal_refs(wrangler_or_worker, GateDomain.CLOUD),
linked_signal_ids(wrangler_or_worker, GateDomain.CLOUD),
)
if plugin_exception:
return gate(
report.platform.platform_id,
"cloud",
GateDomain.CLOUD,
"Cloudflare e runtime externo",
GateOutcome.NOT_APPLICABLE,
SignalSeverity.INFO,
"Apenas a tentativa do plugin foi registrada; isso nao bloqueia a OS.",
"seguir trabalho operacional por wrangler quando houver acao Cloudflare real",
signal_refs(plugin_exception, GateDomain.CLOUD),
linked_signal_ids(plugin_exception, GateDomain.CLOUD),
)
return gate(
report.platform.platform_id,
"cloud",
GateDomain.CLOUD,
"Cloudflare e runtime externo",
GateOutcome.NOT_APPLICABLE,
SignalSeverity.INFO,
"Nenhuma dependencia Cloudflare direta foi detectada para esta leitura.",
"registrar dependencia Cloudflare apenas quando houver Worker, rota ou deploy",
(),
linked_signal_ids(signals, GateDomain.CLOUD),
)
def build_gates(report: PlatformHumanReport, signals: Sequence[OperationalSignal]) -> tuple[ReadinessGate, ...]:
return (
build_repository_gate(report, signals),
build_git_gate(report, signals),
build_documentation_gate(report, signals),
build_tests_gate(report, signals),
build_openapi_gate(report, signals),
build_panel_gate(report, signals),
build_docs_gate(report, signals),
build_integration_gate(report, signals),
build_business_gate(report, signals),
build_cloud_gate(report, signals),
)
def compute_stage(report: PlatformHumanReport, gates: Sequence[ReadinessGate], signals: Sequence[OperationalSignal]) -> HumanReadinessStage:
if not report.scan.exists:
return HumanReadinessStage.NOT_FOUND
critical = any(signal.severity == SignalSeverity.CRITICAL for signal in signals)
high_blocker = any(signal.is_blocking and signal.severity == SignalSeverity.HIGH for signal in signals)
blocked_gate = any(gate.outcome == GateOutcome.BLOCKED for gate in gates)
if critical or blocked_gate:
return HumanReadinessStage.BLOCKED
if high_blocker:
return HumanReadinessStage.CATALOG_ONLY
candidate_stages = [signal.stage for signal in signals]
if report.scan.has_tests and report.scan.has_openapi and report.scan.git_present:
candidate_stages.append(HumanReadinessStage.TECHNICAL_READY)
if any(gate.domain == GateDomain.PANEL and gate.passed for gate in gates):
candidate_stages.append(HumanReadinessStage.PANEL_READY)
if report.average_score >= 85:
candidate_stages.append(HumanReadinessStage.HUMAN_EXPLAINABLE)
return max(candidate_stages or [HumanReadinessStage.LOCAL_ONLY], key=stage_rank)
def compute_panel_ready(gates: Sequence[ReadinessGate], signals: Sequence[OperationalSignal]) -> bool:
panel_gate = next((gate for gate in gates if gate.domain == GateDomain.PANEL), None)
if panel_gate and panel_gate.passed:
return True
panel_signals = [signal for signal in signals if signal.domain == GateDomain.PANEL]
return any(signal.stage == HumanReadinessStage.PANEL_READY for signal in panel_signals) and not any(signal.is_blocking for signal in panel_signals)
def compute_same_source_ready(signals: Sequence[OperationalSignal]) -> bool:
return any(
signal.domain == GateDomain.PANEL
and ("samesource" in " ".join(signal.tags).lower() or "sameSource" in signal.title or "sameSource" in signal.summary)
and not signal.is_blocking
for signal in signals
)
def build_platform_dossier(report: PlatformHumanReport, recommendations: Sequence[Recommendation]) -> PlatformOperationalDossier:
signals = build_operational_signals(report, recommendations)
gates = build_gates(report, signals)
blocker_count = sum(1 for signal in signals if signal.is_blocking)
warning_count = sum(1 for gate_item in gates if gate_item.outcome == GateOutcome.ATTENTION)
stage = compute_stage(report, gates, signals)
panel_ready = compute_panel_ready(gates, signals)
same_source_ready = compute_same_source_ready(signals)
technical_ready = report.scan.exists and report.scan.git_present and report.scan.has_tests
next_actions = next_actions_from_signals(signals, gates, limit=8)
order_targets = merge_unique(recommendation.recommendation_id for recommendation in recommendations if recommendation.platform_id == report.platform.platform_id)
return PlatformOperationalDossier(
platform_id=report.platform.platform_id,
title=report.platform.title,
repo_path=report.scan.repo_path,
stage=stage,
human_score=report.average_score,
technical_ready=technical_ready,
panel_ready=panel_ready,
same_source_ready=same_source_ready,
blocker_count=blocker_count,
warning_count=warning_count,
gates=gates,
signals=signals,
top_next_actions=next_actions,
order_targets=order_targets,
)
def order_title_from_id(order_id: str) -> str:
if "__" not in order_id:
return order_id
return order_id.split("__", 1)[1].replace("-", " ").strip().capitalize()
def order_type_from_id(order_id: str) -> OrderType:
return OrderType.EXECUTIVE if "_EXECUTIVA__" in order_id else OrderType.MANAGERIAL
def closure_status_for_order(platform_dossier: PlatformOperationalDossier | None, order_id: str) -> OrderClosureStatus:
if platform_dossier is None:
return OrderClosureStatus.BLOCKED
if platform_dossier.critical_signals:
return OrderClosureStatus.BLOCKED
if platform_dossier.blocker_count:
return OrderClosureStatus.PARTIAL
if "resolver-ou-formalizar-bloqueios" in order_id and platform_dossier.blocker_count == 0:
return OrderClosureStatus.COMPLETED
return OrderClosureStatus.COMPLETED
def pending_for_order(platform_dossier: PlatformOperationalDossier | None) -> tuple[str, ...]:
if platform_dossier is None:
return ("plataforma relacionada nao encontrada no dossie operacional",)
pending: list[str] = []
for signal in platform_dossier.blocking_signals[:6]:
pending.append(f"{signal.title}: {signal.next_action}")
for gate_item in platform_dossier.gates:
if gate_item.outcome in {GateOutcome.FAIL, GateOutcome.BLOCKED}:
pending.append(f"{gate_item.title}: {gate_item.next_action}")
return merge_unique(pending)
def evidence_for_order(platform_dossier: PlatformOperationalDossier | None, limit: int = 8) -> tuple[SourceReference, ...]:
if platform_dossier is None:
return ()
refs: list[SourceReference] = []
for signal in platform_dossier.signals:
refs.extend(signal.sources)
if len(refs) >= limit:
break
for gate_item in platform_dossier.gates:
refs.extend(gate_item.evidence)
if len(refs) >= limit:
break
return tuple(refs[:limit])
def link_output_orders(platform_id: str, output_orders: Sequence[ServiceOrder]) -> tuple[str, ...]:
linked: list[str] = []
for order in output_orders:
if find_service_order_platform(order) == platform_id or platform_id in order.object_scope.lower():
linked.append(order.order_id)
return tuple(linked)
def build_order_justifications(
platform_dossiers: Sequence[PlatformOperationalDossier],
active_order_ids: Sequence[str],
output_orders: Sequence[ServiceOrder],
) -> tuple[OrderJustification, ...]:
by_platform = {dossier.platform_id: dossier for dossier in platform_dossiers}
justifications: list[OrderJustification] = []
for order_id in active_order_ids:
platform_id = ORDER_PLATFORM_MAP.get(order_id, find_service_order_platform(order_id))
dossier = by_platform.get(platform_id)
status = closure_status_for_order(dossier, order_id)
linked_signals = tuple(signal.signal_id for signal in dossier.signals[:12]) if dossier else ()
linked_gates = tuple(gate_item.gate_id for gate_item in dossier.gates if gate_item.outcome != GateOutcome.NOT_APPLICABLE) if dossier else ()
pending = pending_for_order(dossier)
if dossier is None:
reason = "A plataforma relacionada nao foi encontrada no relatorio da rodada."
execution = "Ordem bloqueada por ausencia de dossie material."
elif pending:
reason = "A ordem foi executada como formalizacao e reducao de ambiguidade; ainda ha pendencias reais."
execution = f"Dossie atualizado para {platform_id}; status {dossier.status_label}; pendencias: {len(pending)}."
else:
reason = "A ordem foi executada com evidencia suficiente para fechar a lacuna mapeada."
execution = f"Dossie atualizado para {platform_id}; nenhum blocker ativo ficou associado a ordem."
justifications.append(
OrderJustification(
order_id=order_id,
order_type=order_type_from_id(order_id),
platform_id=platform_id,
title=order_title_from_id(order_id),
closure_status=status,
reason=reason,
execution_summary=execution,
evidence=evidence_for_order(dossier),
linked_signals=linked_signals,
linked_gates=linked_gates,
resulting_orders=link_output_orders(platform_id, output_orders),
pending_items=pending,
validation_steps=(
"regenerar dossie operacional",
"validar JSON e Markdown gerados",
"atualizar SQL semantico com arquivos alterados",
"registrar ordens de saida somente para pendencias reais",
),
)
)
return tuple(justifications)
def executive_summary(platform_dossiers: Sequence[PlatformOperationalDossier]) -> tuple[str, ...]:
total = len(platform_dossiers)
blocked = sum(1 for item in platform_dossiers if item.blocker_count > 0)
panel_ready = sum(1 for item in platform_dossiers if item.panel_ready)
same_source = sum(1 for item in platform_dossiers if item.same_source_ready)
critical = sum(1 for item in platform_dossiers if item.critical_signals)
avg_score = round(sum(item.human_score for item in platform_dossiers) / total) if total else 0
lines = [
f"Plataformas avaliadas: {total}",
f"Score humano medio: {avg_score}",
f"Plataformas com blocker formalizado: {blocked}",
f"Plataformas com panelReady detectado: {panel_ready}",
f"Plataformas com sameSource detectado: {same_source}",
f"Plataformas com sinal critico: {critical}",
]
blockers = summarize_blockers(tuple(signal for dossier in platform_dossiers for signal in dossier.signals), limit=6)
if blockers:
lines.append("Blockers prioritarios: " + " | ".join(blockers[:4]))
capabilities = summarize_capabilities(tuple(signal for dossier in platform_dossiers for signal in dossier.signals), limit=6)
if capabilities:
lines.append("Capacidades confirmadas: " + " | ".join(capabilities[:4]))
return tuple(lines)
def managerial_summary(platform_dossiers: Sequence[PlatformOperationalDossier]) -> tuple[str, ...]:
lines: list[str] = []
for dossier in sorted(platform_dossiers, key=lambda item: (-item.blocker_count, item.platform_id)):
gates_failed = [gate_item for gate_item in dossier.gates if gate_item.outcome in {GateOutcome.ATTENTION, GateOutcome.FAIL, GateOutcome.BLOCKED}]
gates_text = ", ".join(gate_item.title for gate_item in gates_failed[:3]) or "sem gate em atencao"
lines.append(
f"{dossier.platform_id}: stage={dossier.stage.value}; status={dossier.status_label}; "
f"blockers={dossier.blocker_count}; gates={gates_text}; acao={dossier.primary_action}"
)
return tuple(lines)
def pending_items_from_justifications(justifications: Sequence[OrderJustification], platform_dossiers: Sequence[PlatformOperationalDossier]) -> tuple[str, ...]:
items: list[str] = []
for justification in justifications:
for pending in justification.pending_items:
items.append(f"{justification.order_id}: {pending}")
for dossier in platform_dossiers:
if not dossier.technical_ready:
items.append(f"{dossier.platform_id}: technical_ready=false; {dossier.primary_action}")
if dossier.blocker_count and not dossier.top_next_actions:
items.append(f"{dossier.platform_id}: blockers sem proxima acao detalhada")
return merge_unique(items)
def build_execution_round_dossier(
project_root: Path,
platform_reports: Sequence[PlatformHumanReport],
recommendations: Sequence[Recommendation],
output_orders: Sequence[ServiceOrder],
active_order_ids: Sequence[str] = ACTIVE_ORDER_IDS,
total_code_lines_analyzed: int = 0,
) -> ExecutionRoundDossier:
platform_dossiers = tuple(build_platform_dossier(report, recommendations) for report in platform_reports)
justifications = build_order_justifications(platform_dossiers, active_order_ids, output_orders)
pending = pending_items_from_justifications(justifications, platform_dossiers)
generated_at = utc_now()
return ExecutionRoundDossier(
round_id=f"mais-humana-round-{stable_digest((generated_at, active_order_ids, [order.order_id for order in output_orders]), length=10)}",
project_id="tudo-para-ia-mais-humana",
generated_at=generated_at,
platform_dossiers=platform_dossiers,
order_justifications=justifications,
active_input_orders=tuple(active_order_ids),
output_orders=tuple(order.order_id for order in output_orders),
executive_summary=executive_summary(platform_dossiers),
managerial_summary=managerial_summary(platform_dossiers),
pending_items=pending,
total_code_lines_analyzed=total_code_lines_analyzed,
code_lines_available_in_project=code_line_count(project_root),
)
def dossier_to_markdown(dossier: ExecutionRoundDossier) -> str:
lines = [
"# Dossie operacional humano da rodada",
"",
f"- round_id: `{dossier.round_id}`",
f"- project_id: `{dossier.project_id}`",
f"- generated_at: `{dossier.generated_at}`",
f"- linhas de codigo analisadas no ecossistema: `{dossier.total_code_lines_analyzed}`",
f"- linhas de codigo disponiveis no projeto Mais Humana: `{dossier.code_lines_available_in_project}`",
"",
"## Sumario executivo",
"",
]
lines.extend(f"- {item}" for item in dossier.executive_summary)
lines.extend(["", "## Sumario gerencial", ""])
lines.extend(f"- {item}" for item in dossier.managerial_summary)
lines.extend(["", "## Plataformas", ""])
for item in sorted(dossier.platform_dossiers, key=lambda value: value.platform_id):
lines.append(f"### {item.platform_id}")
lines.append("")
lines.append(f"- stage: `{item.stage.value}`")
lines.append(f"- status: `{item.status_label}`")
lines.append(f"- score_humano: `{item.human_score}`")
lines.append(f"- technical_ready: `{item.technical_ready}`")
lines.append(f"- panel_ready: `{item.panel_ready}`")
lines.append(f"- same_source_ready: `{item.same_source_ready}`")
lines.append(f"- blockers: `{item.blocker_count}`")
lines.append(f"- warnings: `{item.warning_count}`")
lines.append(f"- proxima_acao: {item.primary_action}")
lines.append("")
lines.append("Gates:")
for gate_item in item.gates:
if gate_item.outcome == GateOutcome.NOT_APPLICABLE:
continue
lines.append(f"- `{gate_item.domain.value}` {gate_item.title}: `{gate_item.outcome.value}` - {gate_item.reason}")
lines.append("")
lines.append("Sinais prioritarios:")
for signal in sorted(item.signals, key=lambda value: (-severity_rank(value.severity), value.title))[:10]:
lines.append(f"- `{signal.severity.value}` `{signal.kind.value}` {signal.title}: {signal.next_action}")
lines.append("")
lines.extend(["", "## Ordens executadas/formalizadas", ""])
for item in dossier.order_justifications:
lines.append(f"### {item.order_id}")
lines.append("")
lines.append(f"- tipo: `{item.order_type.value}`")
lines.append(f"- plataforma: `{item.platform_id}`")
lines.append(f"- status: `{item.closure_status.value}`")
lines.append(f"- razao: {item.reason}")
lines.append(f"- executado: {item.execution_summary}")
if item.pending_items:
lines.append("- pendencias:")
lines.extend(f" - {pending}" for pending in item.pending_items[:8])
else:
lines.append("- pendencias: nenhuma pendencia material associada")
if item.resulting_orders:
lines.append("- ordens de saida vinculadas: " + ", ".join(f"`{order}`" for order in item.resulting_orders))
lines.append("")
lines.extend(["", "## Pendencias consolidadas", ""])
if dossier.pending_items:
lines.extend(f"- {item}" for item in dossier.pending_items)
else:
lines.append("- Nenhuma pendencia material consolidada.")
return "\n".join(lines).strip() + "\n"
def order_justifications_markdown(dossier: ExecutionRoundDossier) -> str:
lines = [
"# Justificativa das ordens de servico da rodada",
"",
"Este arquivo liga cada ordem tratada ao dossie operacional, sinais, gates, evidencias e ordens de saida.",
"",
]
for item in dossier.order_justifications:
lines.append(f"## {item.order_id}")
lines.append("")
lines.append(f"- tipo: `{item.order_type.value}`")
lines.append(f"- plataforma: `{item.platform_id}`")
lines.append(f"- fechamento: `{item.closure_status.value}`")
lines.append(f"- status compacto: `{item.compact_status}`")
lines.append(f"- motivo: {item.reason}")
lines.append(f"- resumo da execucao: {item.execution_summary}")
lines.append(f"- sinais vinculados: `{len(item.linked_signals)}`")
lines.append(f"- gates vinculados: `{len(item.linked_gates)}`")
lines.append("")
lines.append("Evidencias:")
if item.evidence:
for ref in item.evidence[:10]:
lines.append(f"- `{ref.reference}` - {ref.summary}")
else:
lines.append("- nenhuma evidencia local direta encontrada")
lines.append("")
lines.append("Validacoes:")
for validation in item.validation_steps:
lines.append(f"- {validation}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def dossier_compact_rows(dossier: ExecutionRoundDossier) -> list[list[str]]:
rows = [["platform", "stage", "status", "score", "blockers", "panel_ready", "same_source", "primary_action"]]
for item in sorted(dossier.platform_dossiers, key=lambda value: value.platform_id):
rows.append(
[
item.platform_id,
item.stage.value,
item.status_label,
str(item.human_score),
str(item.blocker_count),
"yes" if item.panel_ready else "no",
"yes" if item.same_source_ready else "no",
item.primary_action,
]
)
return rows
def write_csv_lines(rows: Sequence[Sequence[str]]) -> str:
output: list[str] = []
for row in rows:
escaped: list[str] = []
for value in row:
text = str(value).replace('"', '""')
if "," in text or "\n" in text or '"' in text:
text = f'"{text}"'
escaped.append(text)
output.append(",".join(escaped))
return "\n".join(output) + "\n"
def dossier_to_dict(dossier: ExecutionRoundDossier) -> dict[str, object]:
return as_plain_data(dossier)

View File

@@ -0,0 +1,526 @@
"""Operational dossier models for human-centered service-order rounds.
The first foundation of the Mais Humana platform already produced matrixes,
reports, charts, orders, and semantic storage. This module adds a second layer:
compact operational records that explain why a service order exists, which
blockers are real, what is merely a planned dependency, and how a human should
understand the status without reading each source repository.
The models are intentionally explicit. They are used by the report generator,
the service-order justifier, the central closeout writer, and tests. Keeping
the structure here avoids spreading status strings across rendering code.
"""
from __future__ import annotations
import hashlib
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, Mapping, Sequence
from .models import Evidence, NeedCategory, OrderType, PlatformHumanReport, Recommendation, ServiceOrder, as_plain_data, merge_unique, slugify, utc_now
class SignalSeverity(str, Enum):
"""How strongly a signal impacts human readiness."""
INFO = "info"
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class SignalKind(str, Enum):
"""Normalized classes of operational findings."""
CAPABILITY = "capability"
BLOCKER = "blocker"
RISK = "risk"
EXCEPTION = "exception"
DECISION = "decision"
GAP = "gap"
EVIDENCE = "evidence"
class SourceConfidence(str, Enum):
"""Confidence of a source after local analysis."""
DIRECT = "direct"
DERIVED = "derived"
DECLARED = "declared"
INFERRED = "inferred"
MISSING = "missing"
class HumanReadinessStage(str, Enum):
"""Lifecycle stage from a human-operational point of view."""
NOT_FOUND = "not_found"
LOCAL_ONLY = "local_only"
CATALOG_ONLY = "catalog_only"
PLANNED = "planned"
TECHNICAL_READY = "technical_ready"
HUMAN_EXPLAINABLE = "human_explainable"
PANEL_READY = "panel_ready"
CONTROLLED_READY = "controlled_ready"
PRODUCTION_READY = "production_ready"
BLOCKED = "blocked"
class GateOutcome(str, Enum):
"""Outcome of a readiness gate."""
PASS = "pass"
ATTENTION = "attention"
FAIL = "fail"
BLOCKED = "blocked"
NOT_APPLICABLE = "not_applicable"
class GateDomain(str, Enum):
"""Gate domains aligned to ecosystem governance."""
REPOSITORY = "repository"
DOCUMENTATION = "documentation"
RUNTIME = "runtime"
TESTS = "tests"
CONTRACT = "contract"
PANEL = "panel"
IDENTITY = "identity"
BUSINESS = "business"
COMPLIANCE = "compliance"
INTEGRATION = "integration"
DOCS = "docs"
OBSERVABILITY = "observability"
SECURITY = "security"
GOVERNANCE = "governance"
CLOUD = "cloud"
class OrderClosureStatus(str, Enum):
"""Status used when an active order is treated in a round."""
COMPLETED = "concluida"
PARTIAL = "parcial"
BLOCKED = "bloqueada"
SUPERSEDED = "substituida"
class EvidenceRole(str, Enum):
"""Role of a piece of evidence in a human explanation."""
PRIMARY = "primary"
SUPPORTING = "supporting"
CONTRADICTORY = "contradictory"
ABSENT = "absent"
STAGE_ORDER: dict[HumanReadinessStage, int] = {
HumanReadinessStage.NOT_FOUND: 0,
HumanReadinessStage.BLOCKED: 1,
HumanReadinessStage.PLANNED: 2,
HumanReadinessStage.CATALOG_ONLY: 3,
HumanReadinessStage.LOCAL_ONLY: 4,
HumanReadinessStage.TECHNICAL_READY: 5,
HumanReadinessStage.HUMAN_EXPLAINABLE: 6,
HumanReadinessStage.PANEL_READY: 7,
HumanReadinessStage.CONTROLLED_READY: 8,
HumanReadinessStage.PRODUCTION_READY: 9,
}
SEVERITY_WEIGHT: dict[SignalSeverity, int] = {
SignalSeverity.INFO: 0,
SignalSeverity.LOW: 1,
SignalSeverity.MEDIUM: 3,
SignalSeverity.HIGH: 6,
SignalSeverity.CRITICAL: 10,
}
def stable_digest(value: object, length: int = 16) -> str:
"""Return a deterministic digest for compact evidence IDs."""
encoded = repr(as_plain_data(value)).encode("utf-8", errors="ignore")
return hashlib.sha256(encoded).hexdigest()[:length]
def severity_rank(value: SignalSeverity) -> int:
return SEVERITY_WEIGHT.get(value, 0)
def stage_rank(value: HumanReadinessStage) -> int:
return STAGE_ORDER.get(value, 0)
def worst_severity(values: Iterable[SignalSeverity]) -> SignalSeverity:
ordered = sorted(values, key=severity_rank, reverse=True)
return ordered[0] if ordered else SignalSeverity.INFO
def highest_stage(values: Iterable[HumanReadinessStage]) -> HumanReadinessStage:
ordered = sorted(values, key=stage_rank, reverse=True)
return ordered[0] if ordered else HumanReadinessStage.NOT_FOUND
def lowest_stage(values: Iterable[HumanReadinessStage]) -> HumanReadinessStage:
ordered = sorted(values, key=stage_rank)
return ordered[0] if ordered else HumanReadinessStage.NOT_FOUND
def normalize_reference(path: str | Path, line: int | None = None) -> str:
ref = str(path).replace("\\", "/")
if line is not None:
return f"{ref}:{line}"
return ref
@dataclass(slots=True)
class SourceReference:
"""Local source used to justify a signal."""
path: str
summary: str
line: int | None = None
confidence: SourceConfidence = SourceConfidence.INFERRED
role: EvidenceRole = EvidenceRole.SUPPORTING
@property
def reference(self) -> str:
return normalize_reference(self.path, self.line)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@classmethod
def from_evidence(cls, evidence: Evidence, *, role: EvidenceRole = EvidenceRole.SUPPORTING) -> "SourceReference":
confidence = SourceConfidence.DIRECT if evidence.confidence >= 0.75 else SourceConfidence.DERIVED
return cls(
path=evidence.path,
line=evidence.line,
summary=evidence.summary,
confidence=confidence,
role=role,
)
@dataclass(slots=True)
class OperationalSignal:
"""A normalized signal detected from code, docs, warnings, or catalog data."""
signal_id: str
platform_id: str
kind: SignalKind
domain: GateDomain
title: str
summary: str
severity: SignalSeverity
stage: HumanReadinessStage
categories: tuple[NeedCategory, ...] = ()
sources: tuple[SourceReference, ...] = ()
tags: tuple[str, ...] = ()
next_action: str = "registrar continuidade operacional"
created_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def is_blocking(self) -> bool:
return self.kind == SignalKind.BLOCKER or self.severity in {SignalSeverity.HIGH, SignalSeverity.CRITICAL}
@property
def reference_digest(self) -> str:
return stable_digest((self.signal_id, self.sources, self.summary), length=12)
def compact_line(self) -> str:
return f"{self.platform_id}:{self.domain.value}:{self.kind.value}:{self.severity.value}:{self.title}"
@dataclass(slots=True)
class ReadinessGate:
"""One human-readiness gate for a platform."""
gate_id: str
platform_id: str
domain: GateDomain
title: str
outcome: GateOutcome
severity: SignalSeverity
reason: str
next_action: str
evidence: tuple[SourceReference, ...] = ()
linked_signals: tuple[str, ...] = ()
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def passed(self) -> bool:
return self.outcome == GateOutcome.PASS
@property
def blocks_panel(self) -> bool:
return self.outcome in {GateOutcome.FAIL, GateOutcome.BLOCKED} and self.severity in {SignalSeverity.HIGH, SignalSeverity.CRITICAL}
def compact_line(self) -> str:
return f"{self.gate_id}={self.outcome.value}/{self.severity.value}"
@dataclass(slots=True)
class PlatformOperationalDossier:
"""Human-operational dossier for one platform."""
platform_id: str
title: str
repo_path: str
stage: HumanReadinessStage
human_score: int
technical_ready: bool
panel_ready: bool
same_source_ready: bool
blocker_count: int
warning_count: int
gates: tuple[ReadinessGate, ...]
signals: tuple[OperationalSignal, ...]
top_next_actions: tuple[str, ...]
order_targets: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def critical_signals(self) -> tuple[OperationalSignal, ...]:
return tuple(signal for signal in self.signals if signal.severity == SignalSeverity.CRITICAL)
@property
def blocking_signals(self) -> tuple[OperationalSignal, ...]:
return tuple(signal for signal in self.signals if signal.is_blocking)
@property
def status_label(self) -> str:
if self.critical_signals:
return "critico"
if self.blocker_count:
return "bloqueado"
if self.panel_ready and self.same_source_ready:
return "pronto-para-painel"
if self.technical_ready:
return "tecnico-pronto"
return "atencao"
@property
def primary_action(self) -> str:
if self.top_next_actions:
return self.top_next_actions[0]
if self.blocker_count:
return "resolver bloqueios antes de promover prontidao"
if not self.panel_ready:
return "materializar contrato de painel e mesma fonte"
return "manter evidencias e regressao"
def signals_by_domain(self) -> dict[str, list[OperationalSignal]]:
grouped: dict[str, list[OperationalSignal]] = {}
for signal in self.signals:
grouped.setdefault(signal.domain.value, []).append(signal)
for items in grouped.values():
items.sort(key=lambda item: (-severity_rank(item.severity), item.title))
return grouped
@dataclass(slots=True)
class OrderJustification:
"""Why a service order exists and what evidence supports it."""
order_id: str
order_type: OrderType
platform_id: str
title: str
closure_status: OrderClosureStatus
reason: str
execution_summary: str
evidence: tuple[SourceReference, ...]
linked_signals: tuple[str, ...]
linked_gates: tuple[str, ...]
resulting_orders: tuple[str, ...]
pending_items: tuple[str, ...]
validation_steps: tuple[str, ...]
generated_at: str = field(default_factory=utc_now)
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def has_pending(self) -> bool:
return bool(self.pending_items)
@property
def compact_status(self) -> str:
pending = "com-pendencia" if self.has_pending else "sem-pendencia"
return f"{self.closure_status.value}:{pending}"
@dataclass(slots=True)
class ExecutionRoundDossier:
"""Full dossier for the current service-order round."""
round_id: str
project_id: str
generated_at: str
platform_dossiers: tuple[PlatformOperationalDossier, ...]
order_justifications: tuple[OrderJustification, ...]
active_input_orders: tuple[str, ...]
output_orders: tuple[str, ...]
executive_summary: tuple[str, ...]
managerial_summary: tuple[str, ...]
pending_items: tuple[str, ...]
total_code_lines_analyzed: int
code_lines_available_in_project: int
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@property
def blockers(self) -> tuple[OperationalSignal, ...]:
signals: list[OperationalSignal] = []
for dossier in self.platform_dossiers:
signals.extend(dossier.blocking_signals)
signals.sort(key=lambda item: (-severity_rank(item.severity), item.platform_id, item.title))
return tuple(signals)
@property
def completed_orders(self) -> tuple[OrderJustification, ...]:
return tuple(item for item in self.order_justifications if item.closure_status == OrderClosureStatus.COMPLETED)
@property
def partial_orders(self) -> tuple[OrderJustification, ...]:
return tuple(item for item in self.order_justifications if item.closure_status == OrderClosureStatus.PARTIAL)
@property
def blocked_orders(self) -> tuple[OrderJustification, ...]:
return tuple(item for item in self.order_justifications if item.closure_status == OrderClosureStatus.BLOCKED)
def dossier_for(self, platform_id: str) -> PlatformOperationalDossier | None:
for dossier in self.platform_dossiers:
if dossier.platform_id == platform_id:
return dossier
return None
def source_refs_from_strings(values: Iterable[str], summary: str, confidence: SourceConfidence = SourceConfidence.DECLARED) -> tuple[SourceReference, ...]:
refs: list[SourceReference] = []
for value in values:
refs.append(SourceReference(path=str(value), summary=summary, confidence=confidence))
return tuple(refs)
def source_refs_from_evidence(values: Iterable[Evidence], limit: int = 6) -> tuple[SourceReference, ...]:
refs: list[SourceReference] = []
for evidence in values:
refs.append(SourceReference.from_evidence(evidence))
if len(refs) >= limit:
break
return tuple(refs)
def summarize_signal_titles(signals: Sequence[OperationalSignal], limit: int = 8) -> tuple[str, ...]:
ordered = sorted(signals, key=lambda item: (-severity_rank(item.severity), item.platform_id, item.title))
return merge_unique(f"{signal.platform_id}: {signal.title}" for signal in ordered[:limit])
def summarize_gate_failures(gates: Sequence[ReadinessGate], limit: int = 8) -> tuple[str, ...]:
ordered = sorted(
[gate for gate in gates if not gate.passed],
key=lambda item: (-severity_rank(item.severity), item.platform_id, item.gate_id),
)
return merge_unique(f"{gate.platform_id}: {gate.title} - {gate.next_action}" for gate in ordered[:limit])
def next_actions_from_signals(signals: Sequence[OperationalSignal], gates: Sequence[ReadinessGate], limit: int = 7) -> tuple[str, ...]:
actions: list[str] = []
ordered_signals = sorted(signals, key=lambda item: (-severity_rank(item.severity), item.title))
for signal in ordered_signals:
actions.append(signal.next_action)
for gate in sorted(gates, key=lambda item: (-severity_rank(item.severity), item.title)):
if not gate.passed:
actions.append(gate.next_action)
return merge_unique(actions)[:limit]
def order_id_for_recommendation(recommendation: Recommendation, order_type: OrderType, index: int) -> str:
prefix = "EXECUTIVA" if order_type == OrderType.EXECUTIVE else "GERENCIAL"
return f"{index:04d}_{prefix}__{slugify(recommendation.title)}"
def compact_report_identity(report: PlatformHumanReport) -> Mapping[str, Any]:
return {
"platform_id": report.platform.platform_id,
"title": report.platform.title,
"repo_path": report.scan.repo_path,
"score": report.average_score,
"warnings": report.scan.warnings,
"evidence": len(report.scan.evidence),
"code_lines": report.scan.code_lines,
}
def find_service_order_platform(order: ServiceOrder | OrderJustification | str, default: str = "ecosystem") -> str:
if isinstance(order, ServiceOrder):
text = f"{order.order_id} {order.title} {order.object_scope}".lower()
elif isinstance(order, OrderJustification):
text = f"{order.order_id} {order.title} {order.reason}".lower()
else:
text = str(order).lower()
known = (
"business",
"compliance",
"customer_ops",
"docs",
"finance",
"gettys",
"identity",
"integracoes",
"intelligence",
"mcps",
"platform_base",
"public",
"stj",
"ui",
)
for platform_id in known:
if platform_id in text or platform_id.replace("_", "-") in text:
return platform_id
return default
def code_line_count(root: Path) -> int:
"""Count local project code lines for production-minimum reporting."""
extensions = {".py", ".ts", ".tsx", ".js", ".mjs", ".cjs", ".java"}
skipped = {"node_modules", ".git", "dist", "build", "coverage", "__pycache__", ".pytest_cache"}
total = 0
if not root.exists():
return 0
stack = [root]
while stack:
current = stack.pop()
try:
entries = list(current.iterdir())
except OSError:
continue
for entry in entries:
if entry.is_dir():
if entry.name not in skipped:
stack.append(entry)
continue
if entry.suffix.lower() not in extensions:
continue
try:
with entry.open("r", encoding="utf-8", errors="ignore") as handle:
total += sum(1 for _ in handle)
except OSError:
continue
return total

294
src/mais_humana/orders.py Normal file
View File

@@ -0,0 +1,294 @@
"""Service order generation for the human-centered platform."""
from __future__ import annotations
from pathlib import Path
import re
from typing import Sequence
from .models import OrderStatus, OrderType, PlatformHumanReport, Recommendation, ServiceOrder, incrementing_id, slugify
def order_from_recommendation(
recommendation: Recommendation,
index: int,
order_type: OrderType,
project_id: str = "tudo-para-ia-mais-humana",
) -> ServiceOrder:
title = recommendation.title
return ServiceOrder(
order_id=incrementing_id("EXECUTIVA" if order_type == OrderType.EXECUTIVE else "GERENCIAL", index, title),
order_type=order_type,
project_id=project_id,
title=title,
purpose=(
"Transformar uma lacuna detectada pela matriz humana em continuidade operacional "
"validavel, registrada e vinculada a evidencias."
),
object_scope=(
f"Plataforma relacionada: {recommendation.platform_id}. "
"Areas afetadas: " + ", ".join(recommendation.affected_paths or ("a identificar durante execucao",))
),
reason=recommendation.reason,
expected_result=recommendation.expected_impact,
affected_paths=recommendation.affected_paths,
validations=recommendation.validation_steps,
ready_criteria=(
"lacuna humana reavaliada",
"evidencia registrada",
"relatorio ou matriz atualizado",
"SQL semantico atualizado",
),
status=OrderStatus.PLANNED,
priority="alta" if recommendation.priority >= 85 else "media",
)
def select_recommendations(
recommendations: Sequence[Recommendation],
order_type: OrderType,
limit: int = 5,
) -> tuple[Recommendation, ...]:
filtered = [item for item in recommendations if item.suggested_order_type == order_type]
if len(filtered) < limit:
filtered.extend(item for item in recommendations if item.suggested_order_type != order_type)
selected: list[Recommendation] = []
seen: set[str] = set()
for item in sorted(filtered, key=lambda rec: (-rec.priority, rec.platform_id, rec.title)):
key = item.recommendation_id
if key in seen:
continue
seen.add(key)
selected.append(item)
if len(selected) >= limit:
break
return tuple(selected)
def build_exit_orders(recommendations: Sequence[Recommendation], project_id: str = "tudo-para-ia-mais-humana") -> tuple[ServiceOrder, ...]:
executive = select_recommendations(recommendations, OrderType.EXECUTIVE, limit=5)
managerial = select_recommendations(recommendations, OrderType.MANAGERIAL, limit=5)
orders: list[ServiceOrder] = []
for index, recommendation in enumerate(executive, start=1):
orders.append(order_from_recommendation(recommendation, index, OrderType.EXECUTIVE, project_id))
for index, recommendation in enumerate(managerial, start=1):
orders.append(order_from_recommendation(recommendation, index, OrderType.MANAGERIAL, project_id))
return tuple(orders)
def order_markdown(order: ServiceOrder, platform_folder: str, real_repo: str) -> str:
lines = [
f"# ORDEM DE SERVICO: {order.order_id}",
"",
"Template oficial:",
"",
"`G:/_codex-git/nucleo-gestao-operacional/templates/ordem-de-servico.md`",
"",
"## Finalidade da ordem de servico",
"",
order.purpose,
"",
"## Objeto da ordem de servico",
"",
order.object_scope,
"",
"## Motivo da criacao da ordem de servico",
"",
order.reason,
"",
"## Resultado esperado da execucao",
"",
order.expected_result,
"",
"## Tipo da ordem",
"",
f"`{order.order_type.value.upper()}`",
"",
"## Identificacao",
"",
f"- order_id: `{order.order_id}`",
f"- tipo: `{order.order_type.value}`",
f"- project_id: `{order.project_id}`",
f"- repo_name: `{real_repo}`",
f"- status: `{order.status.value}`",
f"- prioridade: `{order.priority}`",
"",
"## Caminhos",
"",
"Pasta da plataforma:",
"",
f"`{platform_folder}`",
"",
"Projeto real:",
"",
f"`G:/_codex-git/{real_repo}`",
"",
"SQLite semantico:",
"",
f"`{platform_folder}/controle-semantico.sqlite`",
"",
"## Arquivos e areas afetadas",
"",
]
for path in order.affected_paths or ("a identificar durante a execucao",):
lines.append(f"- `{path}`")
lines.extend(["", "## Validacoes", ""])
for validation in order.validations or ("validacao a definir durante execucao",):
lines.append(f"- {validation}")
lines.extend(["", "## Criterio de pronto", ""])
for criterion in order.ready_criteria:
lines.append(f"- {criterion}")
lines.extend(
[
"",
"## Fechamento obrigatorio",
"",
"- registrar EXECUTADO;",
"- registrar PENDENCIAS reais;",
"- atualizar SQL semantico;",
"- registrar funcao dos arquivos criados ou alterados;",
"- fazer commit e push quando aplicavel;",
"- informar hashes finais.",
"",
]
)
return "\n".join(lines)
def write_orders(
orders: Sequence[ServiceOrder],
platform_folder: Path,
real_repo: str = "tudo-para-ia-mais-humana",
) -> tuple[Path, ...]:
written: list[Path] = []
next_number: dict[str, int] = {}
used_existing_paths: set[Path] = set()
max_attempts_per_order = 200
for order in orders:
subfolder = "executivas" if order.order_type == OrderType.EXECUTIVE else "gerenciais"
target_dir = platform_folder / "orders" / subfolder
target_dir.mkdir(parents=True, exist_ok=True)
if subfolder not in next_number:
numbers: list[int] = []
for existing in target_dir.glob("*.md"):
match = re.match(r"^(\d{4})_", existing.name)
if match:
numbers.append(int(match.group(1)))
next_number[subfolder] = (max(numbers) + 1) if numbers else 1
prefix = "EXECUTIVA" if order.order_type == OrderType.EXECUTIVE else "GERENCIAL"
order_slug = slugify(order.title)
existing_same_order = [
path for path in sorted(target_dir.glob(f"*_{prefix}__{order_slug}.md"), reverse=True) if path not in used_existing_paths
]
if existing_same_order:
path = existing_same_order[0]
used_existing_paths.add(path)
order.order_id = path.stem
try:
path.write_text(order_markdown(order, str(platform_folder), real_repo), encoding="utf-8")
except PermissionError:
pass
else:
written.append(path)
continue
last_error: PermissionError | None = None
for _attempt in range(max_attempts_per_order):
order.order_id = incrementing_id(prefix, next_number[subfolder], order.title)
next_number[subfolder] += 1
filename = order.order_id
if not filename.endswith(".md"):
filename += ".md"
path = target_dir / filename
if path.exists():
continue
try:
path.write_text(order_markdown(order, str(platform_folder), real_repo), encoding="utf-8")
except PermissionError as exc:
last_error = exc
continue
written.append(path)
break
else:
detail = f"Nao foi possivel gravar ordem em {target_dir} apos {max_attempts_per_order} tentativas."
if last_error is not None:
detail = f"{detail} Ultimo erro: {last_error}"
raise PermissionError(detail)
return tuple(written)
def executed_order_markdown(reports: Sequence[PlatformHumanReport], orders: Sequence[ServiceOrder]) -> str:
total_code = sum(report.scan.code_lines for report in reports)
avg = round(sum(report.average_score for report in reports) / len(reports)) if reports else 0
lines = [
"# EXECUTADO - Fundacao da plataforma tudo-para-ia-mais-humana",
"",
"## Sintese",
"",
"A rodada criou uma base operacional inicial para traduzir o estado tecnico do ecossistema em leitura humana.",
"",
f"- plataformas avaliadas: {len(reports)}",
f"- linhas de codigo analisadas: {total_code}",
f"- score medio humano: {avg}",
f"- ordens de saida criadas: {len(orders)}",
"",
"## Arquivos e capacidades criadas",
"",
"- pacote Python `mais_humana` com scanner, matriz, DOCX, SVG, SQLite e gerador de OS;",
"- relatorios DOCX e Markdown por plataforma;",
"- relatorio geral do ecossistema;",
"- graficos SVG de maturidade e matriz;",
"- dados JSON auditaveis;",
"- SQL semantico atualizado;",
"- ordens executivas e gerenciais de saida.",
"",
"## Plataformas avaliadas",
"",
]
for report in sorted(reports, key=lambda item: item.platform.platform_id):
lines.append(f"- {report.platform.platform_id}: score {report.average_score}, codigo {report.scan.code_lines} linhas")
return "\n".join(lines) + "\n"
def pending_markdown(reports: Sequence[PlatformHumanReport], push_status: str | None = None) -> str:
lines = ["# PENDENCIAS-CODEX - tudo-para-ia-mais-humana", ""]
lines.append("## Pendencias reais")
lines.append("")
if push_status:
lines.append(f"- Sincronizacao Git remota: {push_status}")
for report in sorted(reports, key=lambda item: item.platform.platform_id):
for warning in report.scan.warnings:
lines.append(f"- {report.platform.platform_id}: {warning}")
if len(lines) == 4:
lines.append("- Nenhuma pendencia material detectada nesta rodada.")
lines.append("")
lines.append("## Proximo passo")
lines.append("")
lines.append("Executar as ordens de saida criadas e comparar novos scores contra os dados desta rodada.")
return "\n".join(lines) + "\n"
def audit_markdown(reports: Sequence[PlatformHumanReport], orders: Sequence[ServiceOrder]) -> str:
lines = [
"# AUDITORIA-GPT - Fundacao tudo-para-ia-mais-humana",
"",
"## Confirmado",
"",
"- Repositorio real estruturado como plataforma propria.",
"- Base tecnica criada para gerar relatorios humanos e matrizes.",
"- Ordens de saida geradas a partir de recomendacoes reais.",
"- SQL semantico atualizado por camada compacta.",
"",
"## Parcial",
"",
"- Push remoto depende de credencial Git disponivel no ambiente.",
"- Relatorios DOCX iniciais foram gerados por escritor minimalista sem dependencia externa.",
"",
"## Amostra de scores",
"",
]
for report in sorted(reports, key=lambda item: item.average_score)[:8]:
lines.append(f"- {report.platform.platform_id}: {report.average_score}")
lines.extend(["", "## Ordens criadas", ""])
for order in orders:
lines.append(f"- {order.order_id}: {order.title}")
return "\n".join(lines) + "\n"

155
src/mais_humana/paths.py Normal file
View File

@@ -0,0 +1,155 @@
"""Path conventions for the Mais Humana platform."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from .models import as_plain_data
@dataclass(slots=True)
class PlatformPaths:
ecosystem_root: Path
project_root: Path
central_folder: Path
@property
def sqlite_path(self) -> Path:
return self.central_folder / "controle-semantico.sqlite"
@property
def reports_dir(self) -> Path:
return self.central_folder / "reports"
@property
def orders_executive_dir(self) -> Path:
return self.central_folder / "orders" / "executivas"
@property
def orders_managerial_dir(self) -> Path:
return self.central_folder / "orders" / "gerenciais"
@property
def project_data_dir(self) -> Path:
return self.project_root / "dados"
@property
def project_docx_dir(self) -> Path:
return self.project_root / "relatorios-docx"
def ensure(self) -> None:
for path in (
self.project_root,
self.central_folder,
self.reports_dir,
self.orders_executive_dir,
self.orders_managerial_dir,
self.project_data_dir,
self.project_docx_dir,
):
path.mkdir(parents=True, exist_ok=True)
def to_dict(self) -> dict[str, object]:
return as_plain_data(
{
"ecosystem_root": self.ecosystem_root,
"project_root": self.project_root,
"central_folder": self.central_folder,
"sqlite_path": self.sqlite_path,
"reports_dir": self.reports_dir,
"orders_executive_dir": self.orders_executive_dir,
"orders_managerial_dir": self.orders_managerial_dir,
}
)
def default_paths(root: str | Path = "G:/_codex-git") -> PlatformPaths:
base = Path(root)
return PlatformPaths(
ecosystem_root=base,
project_root=base / "tudo-para-ia-mais-humana",
central_folder=base
/ "nucleo-gestao-operacional"
/ "central-de-ordem-de-servico"
/ "projects"
/ "15_repo_tudo-para-ia-mais-humana",
)
def assert_real_repo_name(path: Path) -> None:
if path.name.startswith("15_") or path.name.startswith("01_"):
raise ValueError("numero da pasta gerencial nao pode fazer parte do nome do repositorio real")
if " " in path.name:
raise ValueError("nome do repositorio real deve ser slug sem espacos")
def central_project_folder_name(project_root: Path, ordinal: int = 15) -> str:
assert_real_repo_name(project_root)
return f"{ordinal:02d}_repo_{project_root.name}"
def expected_remote_url(repo_name: str) -> str:
if repo_name.startswith("http://") or repo_name.startswith("https://"):
return repo_name
clean = repo_name.strip().removeprefix("admin/")
return f"https://git.ami.app.br/admin/{clean}.git"
def platform_relative(path: Path, base: Path) -> str:
try:
return str(path.relative_to(base)).replace("\\", "/")
except ValueError:
return str(path).replace("\\", "/")
def describe_paths(paths: PlatformPaths) -> tuple[str, ...]:
return (
f"ecosystem_root={paths.ecosystem_root}",
f"project_root={paths.project_root}",
f"central_folder={paths.central_folder}",
f"sqlite_path={paths.sqlite_path}",
f"reports_dir={paths.reports_dir}",
f"orders_executive_dir={paths.orders_executive_dir}",
f"orders_managerial_dir={paths.orders_managerial_dir}",
)
def path_health(paths: PlatformPaths) -> dict[str, bool]:
return {
"ecosystem_root_exists": paths.ecosystem_root.exists(),
"project_root_exists": paths.project_root.exists(),
"central_folder_exists": paths.central_folder.exists(),
"sqlite_parent_exists": paths.sqlite_path.parent.exists(),
"reports_dir_exists": paths.reports_dir.exists(),
"orders_executive_dir_exists": paths.orders_executive_dir.exists(),
"orders_managerial_dir_exists": paths.orders_managerial_dir.exists(),
}
def path_health_markdown(paths: PlatformPaths) -> str:
lines = ["# Path Health Mais Humana", ""]
for key, value in path_health(paths).items():
lines.append(f"- {key}: `{value}`")
lines.append("")
lines.append("## Caminhos")
lines.append("")
for line in describe_paths(paths):
lines.append(f"- `{line}`")
return "\n".join(lines) + "\n"
def missing_required_paths(paths: PlatformPaths) -> tuple[str, ...]:
health = path_health(paths)
return tuple(key for key, exists in health.items() if not exists)
def path_ready(paths: PlatformPaths) -> bool:
return not missing_required_paths(paths)
def path_action_hint(paths: PlatformPaths) -> str:
missing = missing_required_paths(paths)
if not missing:
return "caminhos essenciais prontos"
return "criar ou validar: " + ", ".join(missing)

View File

@@ -0,0 +1,97 @@
"""Generate operational playbooks oriented to human roles."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
from .catalog import HUMAN_PROFILES, PROFILE_BY_ID
from .models import PlatformHumanReport, Recommendation, as_plain_data
@dataclass(slots=True)
class PlaybookStep:
step_id: str
title: str
action: str
evidence_needed: str
done_when: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class HumanPlaybook:
playbook_id: str
profile_id: str
title: str
purpose: str
steps: tuple[PlaybookStep, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def step_from_recommendation(profile_id: str, recommendation: Recommendation, index: int) -> PlaybookStep:
return PlaybookStep(
step_id=f"{profile_id}-{recommendation.platform_id}-{index}",
title=recommendation.title,
action=recommendation.reason,
evidence_needed=", ".join(recommendation.validation_steps or ("validacao operacional",)),
done_when=recommendation.expected_impact,
)
def build_playbook_for_profile(profile_id: str, reports: Sequence[PlatformHumanReport]) -> HumanPlaybook:
profile = PROFILE_BY_ID[profile_id]
candidate_recommendations: list[Recommendation] = []
for report in reports:
profile_cells = [cell for cell in report.cells if cell.profile_id == profile_id]
weak = profile_cells and profile_cells[0].score < 70
if profile_id in report.platform.expected_profiles or weak:
candidate_recommendations.extend(report.recommendations[:3])
if not candidate_recommendations:
for report in reports[:3]:
candidate_recommendations.extend(report.recommendations[:1])
candidate_recommendations.sort(key=lambda item: (-item.priority, item.platform_id))
steps = tuple(
step_from_recommendation(profile_id, recommendation, index)
for index, recommendation in enumerate(candidate_recommendations[:8], start=1)
)
return HumanPlaybook(
playbook_id=f"playbook-{profile_id}",
profile_id=profile_id,
title=f"Playbook humano - {profile.name}",
purpose=(
f"Orientar {profile.name} a interpretar estado, evidencias e proximas acoes "
"sem depender de leitura direta do codigo."
),
steps=steps,
)
def build_playbooks(reports: Sequence[PlatformHumanReport]) -> tuple[HumanPlaybook, ...]:
return tuple(build_playbook_for_profile(profile.profile_id, reports) for profile in HUMAN_PROFILES)
def playbooks_markdown(playbooks: Sequence[HumanPlaybook]) -> str:
lines = ["# Playbooks humanos", ""]
for playbook in playbooks:
lines.append(f"## {playbook.title}")
lines.append("")
lines.append(playbook.purpose)
lines.append("")
for step in playbook.steps:
lines.append(f"### {step.title}")
lines.append("")
lines.append(f"- acao: {step.action}")
lines.append(f"- evidencia necessaria: {step.evidence_needed}")
lines.append(f"- pronto quando: {step.done_when}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def playbook_summary(playbooks: Sequence[HumanPlaybook]) -> dict[str, int]:
return {playbook.profile_id: len(playbook.steps) for playbook in playbooks}

View File

@@ -0,0 +1,189 @@
"""Human-readable query helpers over the governance portfolio."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .governance_models import EcosystemGovernancePortfolio, GovernanceDomain, GovernanceStatus, PlatformGovernanceCard
from .models import as_plain_data, merge_unique
@dataclass(slots=True)
class PortfolioQuestion:
question_id: str
question: str
answer: str
evidence: tuple[str, ...]
next_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def card_or_none(portfolio: EcosystemGovernancePortfolio, platform_id: str) -> PlatformGovernanceCard | None:
return portfolio.card_for(platform_id)
def summarize_card(card: PlatformGovernanceCard) -> str:
blockers = ", ".join(check.title for check in card.blockers[:3]) or "sem blocker principal"
action = card.next_actions[0] if card.next_actions else "manter regressao"
return (
f"{card.platform_id} esta em status {card.status_label}, score {card.governance_score}, "
f"maturidade {card.maturity.value}. Blockers: {blockers}. Proxima acao: {action}."
)
def strongest_platforms(portfolio: EcosystemGovernancePortfolio, limit: int = 5) -> tuple[PlatformGovernanceCard, ...]:
return tuple(sorted(portfolio.cards, key=lambda item: (-item.governance_score, item.platform_id))[:limit])
def weakest_platforms(portfolio: EcosystemGovernancePortfolio, limit: int = 5) -> tuple[PlatformGovernanceCard, ...]:
return tuple(sorted(portfolio.cards, key=lambda item: (item.governance_score, item.platform_id))[:limit])
def platforms_by_domain_gap(portfolio: EcosystemGovernancePortfolio, domain: GovernanceDomain) -> tuple[PlatformGovernanceCard, ...]:
cards: list[PlatformGovernanceCard] = []
for card in portfolio.cards:
if any(check.domain == domain and check.status in {GovernanceStatus.ATTENTION, GovernanceStatus.FAIL, GovernanceStatus.BLOCKED} for check in card.checks):
cards.append(card)
cards.sort(key=lambda item: (item.governance_score, item.platform_id))
return tuple(cards)
def blockers_for_domain(portfolio: EcosystemGovernancePortfolio, domain: GovernanceDomain) -> tuple[str, ...]:
items: list[str] = []
for card in portfolio.cards:
for check in card.blockers:
if check.domain == domain:
items.append(f"{card.platform_id}: {check.title} - {check.next_action}")
return merge_unique(items)
def build_operational_questions(portfolio: EcosystemGovernancePortfolio) -> tuple[PortfolioQuestion, ...]:
questions: list[PortfolioQuestion] = []
questions.append(
PortfolioQuestion(
question_id="estado-geral-governanca",
question="Qual e o estado geral de governanca humana do ecossistema?",
answer=(
f"O score medio de governanca e {portfolio.average_governance_score}. "
f"Plataformas bloqueadas: {len(portfolio.blocked_platforms)}. "
f"Plataformas controladas: {len(portfolio.controlled_platforms)}."
),
evidence=portfolio.executive_summary,
next_action="atuar primeiro nos blockers de dominio com maior impacto humano",
)
)
weak = weakest_platforms(portfolio)
questions.append(
PortfolioQuestion(
question_id="plataformas-mais-fracas",
question="Quais plataformas mais precisam de continuidade?",
answer="As plataformas com menor score sao: " + ", ".join(f"{card.platform_id} ({card.governance_score})" for card in weak),
evidence=tuple(summarize_card(card) for card in weak),
next_action="executar as OS vinculadas aos checks dessas plataformas",
)
)
strong = strongest_platforms(portfolio)
questions.append(
PortfolioQuestion(
question_id="plataformas-mais-fortes",
question="Quais plataformas estao mais maduras para leitura humana?",
answer="As plataformas mais fortes sao: " + ", ".join(f"{card.platform_id} ({card.governance_score})" for card in strong),
evidence=tuple(summarize_card(card) for card in strong),
next_action="usar essas plataformas como referencia de padrao e regressao",
)
)
for domain in (
GovernanceDomain.DOCS,
GovernanceDomain.INTEGRATIONS,
GovernanceDomain.IDENTITY,
GovernanceDomain.BUSINESS,
GovernanceDomain.MCP,
GovernanceDomain.CLOUD,
GovernanceDomain.OBSERVABILITY,
):
blockers = blockers_for_domain(portfolio, domain)
impacted = platforms_by_domain_gap(portfolio, domain)
answer = (
f"Dominio {domain.value} tem {len(blockers)} blockers e "
f"{len(impacted)} plataformas com gap/atencao."
)
if blockers:
answer += " Principais: " + " | ".join(blockers[:3])
questions.append(
PortfolioQuestion(
question_id=f"dominio-{domain.value}",
question=f"O que bloqueia ou exige atencao no dominio {domain.value}?",
answer=answer,
evidence=blockers[:8] or tuple(summarize_card(card) for card in impacted[:5]),
next_action=(
f"priorizar checks do dominio {domain.value} e validar owner "
"institucional antes da proxima promocao"
),
)
)
questions.append(
PortfolioQuestion(
question_id="ordens-saida-justificadas",
question="As ordens de saida estao justificadas por checks reais?",
answer=(
f"Ha {len(portfolio.order_candidates)} candidatas de OS derivadas de checks de governanca. "
"Cada candidata guarda source_check_ids e validacoes."
),
evidence=tuple(f"{candidate.candidate_id}: {', '.join(candidate.source_check_ids)}" for candidate in portfolio.order_candidates[:12]),
next_action="manter ativas apenas ordens ligadas a pendencias reais ou continuidade impossivel nesta rodada",
)
)
return tuple(questions)
def questions_markdown(questions: Sequence[PortfolioQuestion]) -> str:
lines = ["# Perguntas operacionais sobre governanca", ""]
for question in questions:
lines.append(f"## {question.question}")
lines.append("")
lines.append(question.answer)
lines.append("")
lines.append(f"Proxima acao: {question.next_action}")
if question.evidence:
lines.append("")
lines.append("Evidencias:")
for item in question.evidence[:10]:
lines.append(f"- {item}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def questions_rows(questions: Sequence[PortfolioQuestion]) -> list[list[str]]:
rows = [["question_id", "question", "answer", "next_action", "evidence_count"]]
for question in questions:
rows.append([question.question_id, question.question, question.answer, question.next_action, str(len(question.evidence))])
return rows
def query_by_keyword(questions: Sequence[PortfolioQuestion], keyword: str) -> tuple[PortfolioQuestion, ...]:
lowered = keyword.lower()
return tuple(
question
for question in questions
if lowered in question.question.lower() or lowered in question.answer.lower() or any(lowered in evidence.lower() for evidence in question.evidence)
)
def unresolved_question_ids(questions: Sequence[PortfolioQuestion]) -> tuple[str, ...]:
ids: list[str] = []
for question in questions:
text = f"{question.answer} {question.next_action}".lower()
if "blocker" in text or "gap" in text or "priorizar" in text:
ids.append(question.question_id)
return tuple(ids)
def compact_question_payload(questions: Sequence[PortfolioQuestion]) -> dict[str, object]:
return {
"count": len(questions),
"unresolved": unresolved_question_ids(questions),
"questions": [question.to_dict() for question in questions],
}

237
src/mais_humana/quality.py Normal file
View File

@@ -0,0 +1,237 @@
"""Quality gates for human-centered readiness.
The gates separate technical readiness from human usefulness. A repository can
build successfully and still be poor for a support analyst, a CEO, or a client.
This module makes that distinction explicit and machine-checkable.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .models import EvidenceKind, MatrixCell, PlatformHumanReport, PlatformScan, as_plain_data
@dataclass(slots=True)
class GateResult:
gate_id: str
title: str
passed: bool
severity: str
reason: str
evidence: tuple[str, ...]
next_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class PlatformQualityReport:
platform_id: str
gates: tuple[GateResult, ...]
human_ready: bool
technical_ready: bool
blocker_count: int
warning_count: int
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def refs_for_kind(scan: PlatformScan, kind: EvidenceKind, limit: int = 5) -> tuple[str, ...]:
refs = [evidence.reference for evidence in scan.evidence if evidence.kind == kind]
return tuple(refs[:limit])
def gate_repository_exists(scan: PlatformScan) -> GateResult:
return GateResult(
gate_id="repository_exists",
title="Repositorio real existe",
passed=scan.exists,
severity="blocker" if not scan.exists else "info",
reason="Repositorio local encontrado." if scan.exists else "Repositorio local nao encontrado.",
evidence=(scan.repo_path,),
next_action="Criar ou clonar repositorio real sem numero da pasta gerencial." if not scan.exists else "Manter rastreabilidade.",
)
def gate_git_ready(scan: PlatformScan) -> GateResult:
passed = scan.exists and scan.git_present
evidence = tuple(item for item in (scan.branch, scan.head, scan.remote_origin) if item)
return GateResult(
gate_id="git_ready",
title="Git operacional",
passed=passed,
severity="blocker" if scan.exists and not scan.git_present else "info",
reason="Git local detectado." if passed else "Repositorio sem .git ou inacessivel.",
evidence=evidence or (scan.repo_path,),
next_action="Inicializar Git, configurar origin e registrar hash final." if not passed else "Validar status antes de commit.",
)
def gate_documentation(scan: PlatformScan) -> GateResult:
passed = bool(scan.readme_excerpt)
evidence = refs_for_kind(scan, EvidenceKind.README) or ("README.md",)
return GateResult(
gate_id="documentation",
title="Documentacao inicial",
passed=passed,
severity="warning" if not passed else "info",
reason="README tecnico/humano encontrado." if passed else "README inicial nao encontrado.",
evidence=evidence,
next_action="Criar README com missao, comandos, validacoes e papel humano." if not passed else "Manter README alinhado ao estado real.",
)
def gate_tests(scan: PlatformScan) -> GateResult:
passed = scan.has_tests
return GateResult(
gate_id="tests",
title="Testes detectaveis",
passed=passed,
severity="warning" if not passed else "info",
reason="Testes foram encontrados." if passed else "Nenhum teste detectado pela varredura local.",
evidence=refs_for_kind(scan, EvidenceKind.TEST) or ("tests/",),
next_action="Criar smoke ou teste canonico de uso humano." if not passed else "Executar suite antes de fechar OS.",
)
def gate_operational_evidence(scan: PlatformScan) -> GateResult:
refs = refs_for_kind(scan, EvidenceKind.OBSERVABILITY) + refs_for_kind(scan, EvidenceKind.ROUTE)
passed = bool(refs)
return GateResult(
gate_id="operational_evidence",
title="Evidencia operacional",
passed=passed,
severity="warning" if not passed else "info",
reason="Health/readiness/rota/evidencia detectada." if passed else "Sem evidencia operacional clara.",
evidence=refs or ("health/readiness",),
next_action="Publicar health, readiness, evidencia ou comandos humanos." if not passed else "Revalidar endpoints e evidencias.",
)
def gate_human_matrix(cells: Sequence[MatrixCell]) -> GateResult:
if not cells:
return GateResult(
gate_id="human_matrix",
title="Matriz humana",
passed=False,
severity="blocker",
reason="Matriz sem celulas.",
evidence=(),
next_action="Gerar matriz plataforma x perfil.",
)
average = round(sum(cell.score for cell in cells) / len(cells))
low = [cell.profile_id for cell in cells if cell.score < 45]
return GateResult(
gate_id="human_matrix",
title="Cobertura por perfil humano",
passed=average >= 60 and len(low) <= max(1, len(cells) // 4),
severity="warning" if low else "info",
reason=f"Score medio {average}; perfis frageis: {', '.join(low[:6]) or 'nenhum'}.",
evidence=tuple(f"{cell.profile_id}:{cell.score}" for cell in sorted(cells, key=lambda item: item.score)[:6]),
next_action="Priorizar perfis com score baixo e transformar lacunas em telas, relatorios ou OS.",
)
def gate_no_known_blocker(scan: PlatformScan) -> GateResult:
blockers = scan.platform.known_blockers
return GateResult(
gate_id="known_blockers",
title="Bloqueios conhecidos",
passed=not blockers,
severity="blocker" if blockers else "info",
reason="Sem bloqueios conhecidos no catalogo." if not blockers else "; ".join(blockers),
evidence=tuple(blockers),
next_action="Resolver, formalizar excecao ou criar OS especifica para bloqueio." if blockers else "Manter catalogo atualizado.",
)
def evaluate_platform_quality(report: PlatformHumanReport) -> PlatformQualityReport:
gates = (
gate_repository_exists(report.scan),
gate_git_ready(report.scan),
gate_documentation(report.scan),
gate_tests(report.scan),
gate_operational_evidence(report.scan),
gate_human_matrix(report.cells),
gate_no_known_blocker(report.scan),
)
blocker_count = sum(1 for gate in gates if not gate.passed and gate.severity == "blocker")
warning_count = sum(1 for gate in gates if not gate.passed and gate.severity == "warning")
technical_ready = gates[0].passed and gates[1].passed and gates[3].passed
human_ready = technical_ready and blocker_count == 0 and report.average_score >= 70
return PlatformQualityReport(
platform_id=report.platform.platform_id,
gates=gates,
human_ready=human_ready,
technical_ready=technical_ready,
blocker_count=blocker_count,
warning_count=warning_count,
)
def evaluate_ecosystem_quality(reports: Sequence[PlatformHumanReport]) -> tuple[PlatformQualityReport, ...]:
return tuple(evaluate_platform_quality(report) for report in reports)
def worst_gates(quality_reports: Sequence[PlatformQualityReport], limit: int = 12) -> tuple[GateResult, ...]:
failed: list[GateResult] = []
for quality in quality_reports:
failed.extend(gate for gate in quality.gates if not gate.passed)
failed.sort(key=lambda gate: (0 if gate.severity == "blocker" else 1, gate.gate_id))
return tuple(failed[:limit])
def quality_summary_lines(quality_reports: Sequence[PlatformQualityReport]) -> list[str]:
total = len(quality_reports)
human_ready = sum(1 for item in quality_reports if item.human_ready)
technical_ready = sum(1 for item in quality_reports if item.technical_ready)
blockers = sum(item.blocker_count for item in quality_reports)
warnings = sum(item.warning_count for item in quality_reports)
lines = [
f"Plataformas avaliadas: {total}",
f"Prontas tecnicamente: {technical_ready}",
f"Prontas para leitura humana: {human_ready}",
f"Blockers: {blockers}",
f"Warnings: {warnings}",
]
for gate in worst_gates(quality_reports, limit=10):
lines.append(f"{gate.severity.upper()} {gate.gate_id}: {gate.reason} Proxima acao: {gate.next_action}")
return lines
def quality_to_markdown(quality_reports: Sequence[PlatformQualityReport]) -> str:
lines = ["# Quality Gate Mais Humano", ""]
lines.extend(f"- {line}" for line in quality_summary_lines(quality_reports))
lines.append("")
for quality in sorted(quality_reports, key=lambda item: item.platform_id):
lines.append(f"## {quality.platform_id}")
lines.append("")
lines.append(f"- technical_ready: `{quality.technical_ready}`")
lines.append(f"- human_ready: `{quality.human_ready}`")
lines.append(f"- blockers: `{quality.blocker_count}`")
lines.append(f"- warnings: `{quality.warning_count}`")
for gate in quality.gates:
status = "ok" if gate.passed else gate.severity
lines.append(f"- {gate.gate_id}: {status} - {gate.reason}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def quality_to_rows(quality_reports: Sequence[PlatformQualityReport]) -> list[list[str]]:
rows: list[list[str]] = []
for item in sorted(quality_reports, key=lambda quality: quality.platform_id):
rows.append(
[
item.platform_id,
"yes" if item.technical_ready else "no",
"yes" if item.human_ready else "no",
str(item.blocker_count),
str(item.warning_count),
]
)
return rows

View File

@@ -0,0 +1,123 @@
"""Generate human questions and answers from platform reports."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
from .catalog import HUMAN_PROFILES, PROFILE_BY_ID
from .models import MatrixCell, PlatformHumanReport, as_plain_data, score_label
@dataclass(slots=True)
class HumanQuestion:
question_id: str
platform_id: str
profile_id: str
question: str
answer: str
confidence: str
evidence: tuple[str, ...]
next_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def confidence_for_score(score: int) -> str:
if score >= 80:
return "alta"
if score >= 55:
return "media"
if score >= 30:
return "baixa"
return "muito_baixa"
def answer_for_cell(report: PlatformHumanReport, cell: MatrixCell, question: str) -> str:
profile = PROFILE_BY_ID[cell.profile_id]
if cell.score >= 75:
return (
f"Para {profile.name}, {report.platform.title} ja mostra atendimento {score_label(cell.score)}. "
f"A leitura atual e: {cell.explanation}"
)
if cell.score >= 45:
return (
f"Para {profile.name}, {report.platform.title} oferece base parcial. "
"A proxima etapa e transformar as evidencias tecnicas em telas, relatorios ou comandos humanos."
)
return (
f"Para {profile.name}, {report.platform.title} ainda nao responde bem a pergunta '{question}'. "
"A plataforma precisa de evidencia, fluxo ou relatorio orientado a esse perfil."
)
def next_action_for_cell(cell: MatrixCell) -> str:
if cell.gaps:
return cell.gaps[0]
if cell.score < 75:
return "Criar melhoria humana especifica para o perfil."
return "Manter evidencia e revalidar periodicamente."
def questions_for_report(report: PlatformHumanReport, max_per_profile: int = 2) -> tuple[HumanQuestion, ...]:
output: list[HumanQuestion] = []
cells_by_profile = {cell.profile_id: cell for cell in report.cells}
for profile in HUMAN_PROFILES:
cell = cells_by_profile.get(profile.profile_id)
if cell is None:
continue
for index, question in enumerate(profile.typical_questions[:max_per_profile], start=1):
output.append(
HumanQuestion(
question_id=f"{report.platform.platform_id}-{profile.profile_id}-{index}",
platform_id=report.platform.platform_id,
profile_id=profile.profile_id,
question=question,
answer=answer_for_cell(report, cell, question),
confidence=confidence_for_score(cell.score),
evidence=cell.evidence_refs[:5],
next_action=next_action_for_cell(cell),
)
)
return tuple(output)
def questions_for_ecosystem(reports: Sequence[PlatformHumanReport]) -> tuple[HumanQuestion, ...]:
questions: list[HumanQuestion] = []
for report in reports:
questions.extend(questions_for_report(report))
return tuple(questions)
def questions_markdown(questions: Sequence[HumanQuestion]) -> str:
lines = ["# Perguntas humanas respondidas", ""]
for question in questions:
lines.append(f"## {question.question}")
lines.append("")
lines.append(f"- plataforma: `{question.platform_id}`")
lines.append(f"- perfil: `{question.profile_id}`")
lines.append(f"- confianca: `{question.confidence}`")
lines.append("")
lines.append(question.answer)
lines.append("")
lines.append("Proxima acao: " + question.next_action)
if question.evidence:
lines.append("")
lines.append("Evidencias:")
for evidence in question.evidence:
lines.append(f"- `{evidence}`")
lines.append("")
return "\n".join(lines).strip() + "\n"
def unanswered_questions(questions: Sequence[HumanQuestion]) -> tuple[HumanQuestion, ...]:
return tuple(question for question in questions if question.confidence in {"baixa", "muito_baixa"})
def question_index(questions: Sequence[HumanQuestion]) -> dict[str, list[str]]:
result: dict[str, list[str]] = {}
for question in questions:
result.setdefault(question.profile_id, []).append(question.question_id)
return result

View File

@@ -0,0 +1,127 @@
"""Secret and sensitive-text checks for generated human artifacts."""
from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Sequence
from .models import as_plain_data
SECRET_PATTERNS: tuple[tuple[str, re.Pattern[str]], ...] = (
("generic_token_assignment", re.compile(r"(?i)\b(token|secret|password|api[_-]?key)\s*[:=]\s*['\"]?[A-Za-z0-9_\-]{16,}")),
("bearer_token", re.compile(r"(?i)\bbearer\s+[A-Za-z0-9_\-\.]{20,}")),
(
"cloudflare_token_assignment",
re.compile(r"(?i)\b(cloudflare[_-]?(api[_-]?)?token|cf[_-]?token)\b\s*[:=]\s*['\"]?[A-Za-z0-9_\-]{24,}"),
),
("private_key", re.compile(r"-----BEGIN [A-Z ]*PRIVATE KEY-----")),
("connection_string", re.compile(r"(?i)\b(postgres|mysql|mongodb|redis)://[^\\s]+")),
)
ALLOWLIST_TERMS = {
"credentialRef",
"secretRef",
"tokenRef",
"redaction",
"sem segredo",
"nao vazar",
}
@dataclass(slots=True)
class RedactionFinding:
path: str
pattern_id: str
line: int
sample: str
severity: str
recommendation: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class RedactionReport:
scanned_files: int
findings: tuple[RedactionFinding, ...]
passed: bool
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def is_allowlisted(line: str) -> bool:
lowered = line.lower()
return any(term.lower() in lowered for term in ALLOWLIST_TERMS)
def scan_text_for_secrets(path: str, text: str) -> tuple[RedactionFinding, ...]:
findings: list[RedactionFinding] = []
for line_number, line in enumerate(text.splitlines(), start=1):
if is_allowlisted(line):
continue
for pattern_id, pattern in SECRET_PATTERNS:
match = pattern.search(line)
if not match:
continue
sample = match.group(0)
if len(sample) > 90:
sample = sample[:87] + "..."
severity = "critical" if pattern_id in {"private_key", "connection_string"} else "warning"
findings.append(
RedactionFinding(
path=path,
pattern_id=pattern_id,
line=line_number,
sample=sample,
severity=severity,
recommendation="Substituir valor sensivel por referencia opaca e registrar apenas credentialRef/secretRef.",
)
)
return tuple(findings)
def iter_text_files(root: Path, suffixes: Sequence[str] = (".md", ".json", ".csv", ".html", ".txt")) -> Iterable[Path]:
if not root.exists():
return
for path in root.rglob("*"):
if path.is_file() and path.suffix.lower() in suffixes:
if any(part in {".git", ".test-tmp", "__pycache__", "node_modules", "dist", "build"} for part in path.parts):
continue
yield path
def scan_generated_artifacts(root: Path) -> RedactionReport:
findings: list[RedactionFinding] = []
count = 0
for path in iter_text_files(root):
count += 1
try:
text = path.read_text(encoding="utf-8", errors="ignore")
except OSError:
continue
findings.extend(scan_text_for_secrets(str(path), text))
return RedactionReport(scanned_files=count, findings=tuple(findings), passed=not findings)
def redaction_markdown(report: RedactionReport) -> str:
lines = ["# Redaction Check Mais Humana", ""]
lines.append(f"- arquivos varridos: `{report.scanned_files}`")
lines.append(f"- passou: `{report.passed}`")
lines.append(f"- achados: `{len(report.findings)}`")
lines.append("")
if report.findings:
lines.append("## Achados")
lines.append("")
for finding in report.findings:
lines.append(
f"- `{finding.severity}` {finding.path}:{finding.line} "
f"({finding.pattern_id}) - {finding.recommendation}"
)
else:
lines.append("Nenhum segredo aparente encontrado nos artefatos textuais gerados.")
return "\n".join(lines).strip() + "\n"

548
src/mais_humana/reports.py Normal file
View File

@@ -0,0 +1,548 @@
"""High-level orchestration for report generation."""
from __future__ import annotations
import json
from pathlib import Path
from typing import Sequence
from .catalog import HUMAN_PROFILES, PLATFORMS
from .charts import matrix_heatmap_svg, platform_bar_svg, profile_radar_svg
from .commands import base_validation_commands, commands_markdown, platform_validation_commands
from .contract import build_contract, contract_markdown
from .docx_writer import DocxDocument, write_lines_docx
from .acceptance import acceptance_markdown, build_acceptance_report
from .evidence_index import build_evidence_index, evidence_markdown
from .evidence_graph import build_evidence_graph
from .exit_order_compiler import compile_governance_orders, compiled_orders_markdown, order_coverage_rows, source_candidate_rows
from .governance_diff import (
diff_governance_snapshots,
governance_delta_markdown,
governance_delta_rows,
load_governance_snapshot,
snapshot_from_portfolio,
write_governance_snapshot,
)
from .governance_engine import build_governance_portfolio, rows_to_csv
from .governance_exports import governance_exports, write_central_lifecycle_exports, write_governance_exports
from .governance_scenarios import build_scenario_portfolio
from .governance_storage import write_governance_semantic_state
from .human_readiness_registry import build_readiness_registry
from .matrix import build_global_recommendations, build_matrix, build_platform_reports, matrix_table
from .models import EcosystemHumanReport, GeneratedFile, PlatformHumanReport, ReportBundle, as_plain_data
from .narratives import ecosystem_markdown, ecosystem_summary_lines, platform_markdown, platform_report_lines
from .orders import audit_markdown, build_exit_orders, executed_order_markdown, pending_markdown, write_orders
from .html_export import write_index_html
from .insights import build_insights, dependency_dot, insights_markdown
from .operational_dossier import (
build_execution_round_dossier,
dossier_compact_rows,
dossier_to_markdown,
order_justifications_markdown,
write_csv_lines,
)
from .playbooks import build_playbooks, playbooks_markdown
from .portfolio_queries import build_operational_questions
from .quality import evaluate_ecosystem_quality, quality_to_markdown
from .questions import questions_for_ecosystem, questions_markdown
from .redaction import redaction_markdown, scan_generated_artifacts
from .round_assurance import assurance_markdown, assurance_rows, build_assurance_suite
from .runtime_budget import build_round_line_budget
from .scanner import scan_ecosystem
from .snapshots import diff_snapshots, load_snapshot, snapshot_delta_markdown, snapshot_from_reports, write_snapshot
from .status_pages import write_central_status_pages
from .service_order_lifecycle import build_round_execution_package
from .status_reconciler import build_reconciled_status, write_reconciled_status
from .storage import write_semantic_state
from .workflow_registry import build_workflow_portfolio
def repo_paths(project_root: Path) -> dict[str, Path]:
return {
"platform_markdown": project_root / "plataformas",
"ecosystem": project_root / "ecossistema",
"docx_platforms": project_root / "relatorios-docx" / "plataformas",
"docx_root": project_root / "relatorios-docx",
"charts": project_root / "graficos",
"matrices": project_root / "matrizes",
"data": project_root / "dados",
"orders": project_root / "os-orientadoras",
"goals": project_root / "metas-humanas",
"questions": project_root / "pessoas-e-papeis",
"html": project_root / "ecossistema",
}
def ensure_project_dirs(project_root: Path) -> None:
for path in repo_paths(project_root).values():
path.mkdir(parents=True, exist_ok=True)
for extra in ("paradigma", "pessoas-e-papeis", "telas-e-relatorios", "templates"):
(project_root / extra).mkdir(parents=True, exist_ok=True)
def generated_file(path: Path, project_root: Path, description: str, function: str, file_type: str, relation: str) -> GeneratedFile:
try:
rel = path.relative_to(project_root)
except ValueError:
rel = path
return GeneratedFile(
path=str(rel).replace("\\", "/"),
description=description,
function=function,
file_type=file_type,
changed_by="mais_humana.generate",
change_summary=description,
relation_to_order=relation,
)
def write_json(path: Path, payload: object) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(as_plain_data(payload), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
return path
def write_platform_docx(path: Path, report: PlatformHumanReport) -> Path:
doc = DocxDocument(title=f"Relatorio humano - {report.platform.title}")
doc.heading("Missao", 2)
doc.paragraph(report.platform.mission)
doc.heading("Sintese", 2)
doc.paragraph(report.summary)
doc.heading("Estado atual", 2)
for item in report.current_state:
doc.bullet(item)
doc.heading("Lacunas humanas", 2)
for item in report.missing_for_humans:
doc.bullet(item)
doc.heading("Matriz por perfil", 2)
rows = []
for cell in sorted(report.cells, key=lambda item: item.profile_id):
rows.append((cell.profile_id, str(cell.score), cell.maturity.value, cell.explanation[:180]))
doc.table(("Perfil", "Score", "Maturidade", "Leitura"), rows)
doc.heading("Recomendacoes", 2)
for recommendation in report.recommendations[:8]:
doc.bullet(f"{recommendation.title}: {recommendation.reason}")
return doc.write(path)
def write_ecosystem_docx(path: Path, reports: Sequence[PlatformHumanReport]) -> Path:
doc = DocxDocument(title="Relatorio Geral do Ecossistema Mais Humano")
for line in ecosystem_summary_lines(reports):
if line == "Leitura por necessidade humana":
doc.heading(line, 2)
elif line.startswith("Plataformas "):
doc.heading(line, 2)
else:
doc.paragraph(line)
rows = []
for report in sorted(reports, key=lambda item: item.platform.platform_id):
rows.append((report.platform.platform_id, str(report.average_score), str(report.scan.code_lines), str(len(report.scan.evidence))))
doc.heading("Resumo por plataforma", 2)
doc.table(("Plataforma", "Score", "Linhas", "Evidencias"), rows)
return doc.write(path)
def write_profile_catalog(project_root: Path) -> Path:
path = project_root / "pessoas-e-papeis" / "perfis-humanos.json"
return write_json(path, HUMAN_PROFILES)
def write_platform_catalog(project_root: Path) -> Path:
path = project_root / "dados" / "catalogo-plataformas.json"
return write_json(path, PLATFORMS)
def write_matrix_csv(path: Path, table: Sequence[Sequence[str]]) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
lines = []
for row in table:
escaped = []
for value in row:
text = str(value).replace('"', '""')
if "," in text or "\n" in text:
text = f'"{text}"'
escaped.append(text)
lines.append(",".join(escaped))
path.write_text("\n".join(lines) + "\n", encoding="utf-8")
return path
def write_human_goals(project_root: Path, reports: Sequence[PlatformHumanReport]) -> Path:
path = project_root / "metas-humanas" / "metas-humanas-por-plataforma.md"
lines = ["# Metas humanas por plataforma", ""]
for report in sorted(reports, key=lambda item: item.platform.platform_id):
lines.append(f"## {report.platform.title}")
lines.append("")
lines.append(f"Score atual: {report.average_score}")
lines.append("")
for gap in report.missing_for_humans[:5]:
lines.append(f"- Converter lacuna em entrega: {gap}")
lines.append("")
path.write_text("\n".join(lines), encoding="utf-8")
return path
def write_screen_report_map(project_root: Path, reports: Sequence[PlatformHumanReport]) -> Path:
path = project_root / "telas-e-relatorios" / "mapa-telas-relatorios-esperados.md"
lines = ["# Mapa de telas e relatorios esperados", ""]
for report in sorted(reports, key=lambda item: item.platform.platform_id):
lines.append(f"## {report.platform.title}")
lines.append("")
for surface in report.platform.expected_surfaces:
lines.append(f"- {surface}")
lines.append("")
path.write_text("\n".join(lines), encoding="utf-8")
return path
def write_paradigm(project_root: Path) -> Path:
path = project_root / "paradigma" / "paradigma-mais-humano.md"
text = """# Paradigma Mais Humano
A plataforma tudo-para-ia-mais-humana traduz estado tecnico em compreensao humana.
Ela pergunta:
- quem e atendido;
- como e atendido;
- o que ja funciona;
- o que ainda falta;
- qual ordem de servico melhora a experiencia real.
A plataforma nao substitui o nucleo, a central, o MCP ou a UI. Ela transforma evidencias dessas camadas em relatorios, matrizes e continuidade orientada a pessoas.
"""
path.write_text(text, encoding="utf-8")
return path
def generate(
ecosystem_root: Path,
project_root: Path,
central_platform_folder: Path | None = None,
relation_to_order: str = "0011_GERENCIAL__fundacao-da-plataforma",
push_status: str | None = None,
) -> ReportBundle:
ensure_project_dirs(project_root)
scans = scan_ecosystem(ecosystem_root)
evidence_records = build_evidence_index(scans)
cells = build_matrix(scans)
platform_reports = build_platform_reports(scans, cells)
recommendations = build_global_recommendations(platform_reports)
quality_reports = evaluate_ecosystem_quality(platform_reports)
human_questions = questions_for_ecosystem(platform_reports)
playbooks = build_playbooks(platform_reports)
insights = build_insights(platform_reports, recommendations)
command_specs = base_validation_commands(project_root, central_platform_folder) + platform_validation_commands(platform_reports)
ecosystem_report = EcosystemHumanReport(scans=scans, platform_reports=platform_reports, recommendations=recommendations)
exit_orders = build_exit_orders(recommendations)
generated: list[GeneratedFile] = []
round_dossier = build_execution_round_dossier(
project_root=project_root,
platform_reports=platform_reports,
recommendations=recommendations,
output_orders=exit_orders,
total_code_lines_analyzed=ecosystem_report.total_code_lines,
)
governance_portfolio = build_governance_portfolio(
platform_reports,
recommendations=recommendations,
round_dossier=round_dossier,
extra_text=(push_status or "",),
)
readiness_registry = build_readiness_registry(platform_reports, governance_portfolio)
workflow_portfolio = build_workflow_portfolio(governance_portfolio)
scenario_portfolio = build_scenario_portfolio(governance_portfolio)
governance_orders = compile_governance_orders(governance_portfolio)
governance_questions = build_operational_questions(governance_portfolio)
line_budget = build_round_line_budget(ecosystem_root, project_root)
lifecycle_package = (
build_round_execution_package(
central_platform_folder,
governance_portfolio,
round_dossier=round_dossier,
total_code_lines_analyzed=line_budget.total_technical_lines,
code_lines_available=line_budget.repositories[0].code_lines if line_budget.repositories else 0,
)
if central_platform_folder is not None
else None
)
evidence_graph = build_evidence_graph(
governance_portfolio,
readiness_registry,
workflow_portfolio,
compiled_orders=governance_orders,
)
profile_catalog = write_profile_catalog(project_root)
generated.append(generated_file(profile_catalog, project_root, "Catalogo de perfis humanos considerado pela matriz.", "catalogo de perfis", "json", relation_to_order))
platform_catalog = write_platform_catalog(project_root)
generated.append(generated_file(platform_catalog, project_root, "Catalogo canonico das plataformas avaliadas.", "catalogo de plataformas", "json", relation_to_order))
paradigm = write_paradigm(project_root)
generated.append(generated_file(paradigm, project_root, "Paradigma institucional da plataforma Mais Humana.", "paradigma", "markdown", relation_to_order))
data_path = write_json(project_root / "dados" / "snapshot-ecossistema.json", ecosystem_report)
generated.append(generated_file(data_path, project_root, "Snapshot JSON do ecossistema humano.", "dados auditaveis", "json", relation_to_order))
evidence_json = write_json(project_root / "dados" / "indice-evidencias.json", evidence_records)
generated.append(generated_file(evidence_json, project_root, "Indice JSON de evidencias coletadas.", "indice de evidencias", "json", relation_to_order))
evidence_md = project_root / "ecossistema" / "INDICE-DE-EVIDENCIAS-HUMANAS.md"
evidence_md.write_text(evidence_markdown(evidence_records), encoding="utf-8")
generated.append(generated_file(evidence_md, project_root, "Indice Markdown de evidencias humanas.", "indice de evidencias", "markdown", relation_to_order))
matrix_csv = write_matrix_csv(project_root / "matrizes" / "matriz-plataforma-perfil.csv", matrix_table(cells))
generated.append(generated_file(matrix_csv, project_root, "Matriz plataforma x perfil em CSV.", "matriz tabular", "csv", relation_to_order))
dossier_json = write_json(project_root / "dados" / "dossie-operacional-humano.json", round_dossier)
generated.append(generated_file(dossier_json, project_root, "Dossie operacional humano da rodada em JSON.", "dossie operacional", "json", relation_to_order))
dossier_md = project_root / "ecossistema" / "DOSSIE-OPERACIONAL-HUMANO.md"
dossier_md.write_text(dossier_to_markdown(round_dossier), encoding="utf-8")
generated.append(generated_file(dossier_md, project_root, "Dossie operacional humano da rodada em Markdown.", "dossie operacional", "markdown", relation_to_order))
justifications_md = project_root / "ecossistema" / "JUSTIFICATIVA-ORDENS-DE-SERVICO.md"
justifications_md.write_text(order_justifications_markdown(round_dossier), encoding="utf-8")
generated.append(generated_file(justifications_md, project_root, "Justificativa das ordens de servico por evidencia e gate.", "justificativa de ordens", "markdown", relation_to_order))
dossier_csv = project_root / "matrizes" / "dossie-operacional-humano.csv"
dossier_csv.write_text(write_csv_lines(dossier_compact_rows(round_dossier)), encoding="utf-8")
generated.append(generated_file(dossier_csv, project_root, "Resumo tabular do dossie operacional humano.", "dossie operacional", "csv", relation_to_order))
ecosystem_md = project_root / "ecossistema" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.md"
ecosystem_md.write_text(ecosystem_markdown(platform_reports), encoding="utf-8")
generated.append(generated_file(ecosystem_md, project_root, "Relatorio geral em Markdown.", "relatorio geral", "markdown", relation_to_order))
ecosystem_docx = write_ecosystem_docx(project_root / "relatorios-docx" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.docx", platform_reports)
generated.append(generated_file(ecosystem_docx, project_root, "Relatorio geral em DOCX.", "relatorio docx", "docx", relation_to_order))
heatmap = matrix_heatmap_svg(project_root / "graficos" / "matriz-plataforma-perfil.svg", cells)
generated.append(generated_file(heatmap, project_root, "Heatmap SVG da matriz plataforma x perfil.", "grafico", "svg", relation_to_order))
bars = platform_bar_svg(project_root / "graficos" / "maturidade-por-plataforma.svg", platform_reports)
generated.append(generated_file(bars, project_root, "Grafico SVG de maturidade por plataforma.", "grafico", "svg", relation_to_order))
quality_md = project_root / "ecossistema" / "QUALITY-GATE-MAIS-HUMANO.md"
quality_md.write_text(quality_to_markdown(quality_reports), encoding="utf-8")
generated.append(generated_file(quality_md, project_root, "Quality gate humano por plataforma.", "quality gate", "markdown", relation_to_order))
insights_md = project_root / "ecossistema" / "INSIGHTS-OPERACIONAIS-MAIS-HUMANA.md"
insights_md.write_text(insights_markdown(insights), encoding="utf-8")
generated.append(generated_file(insights_md, project_root, "Insights de risco, dependencias, roadmap e cobertura.", "insights", "markdown", relation_to_order))
dot_path = project_root / "graficos" / "dependencias-humanas.dot"
dot_path.write_text(dependency_dot(insights), encoding="utf-8")
generated.append(generated_file(dot_path, project_root, "Grafo DOT de dependencias humanas entre plataformas.", "grafo", "dot", relation_to_order))
questions_md = project_root / "pessoas-e-papeis" / "perguntas-humanas-respondidas.md"
questions_md.write_text(questions_markdown(human_questions), encoding="utf-8")
generated.append(generated_file(questions_md, project_root, "Perguntas humanas respondidas por plataforma e perfil.", "perguntas humanas", "markdown", relation_to_order))
playbooks_md = project_root / "pessoas-e-papeis" / "playbooks-humanos.md"
playbooks_md.write_text(playbooks_markdown(playbooks), encoding="utf-8")
generated.append(generated_file(playbooks_md, project_root, "Playbooks humanos por perfil operacional.", "playbooks", "markdown", relation_to_order))
commands_md = project_root / "ecossistema" / "COMANDOS-HUMANOS-EQUIVALENTES.md"
commands_md.write_text(commands_markdown(command_specs), encoding="utf-8")
generated.append(generated_file(commands_md, project_root, "Comandos humanos equivalentes para validacao.", "comandos", "markdown", relation_to_order))
for report in platform_reports:
md_path = project_root / "plataformas" / f"{report.platform.platform_id}.md"
md_path.write_text(platform_markdown(report), encoding="utf-8")
generated.append(generated_file(md_path, project_root, f"Relatorio humano Markdown da plataforma {report.platform.platform_id}.", "relatorio por plataforma", "markdown", relation_to_order))
docx_path = write_platform_docx(project_root / "relatorios-docx" / "plataformas" / f"{report.platform.platform_id}.docx", report)
generated.append(generated_file(docx_path, project_root, f"Relatorio humano DOCX da plataforma {report.platform.platform_id}.", "relatorio docx por plataforma", "docx", relation_to_order))
radar = profile_radar_svg(project_root / "graficos" / f"radar-{report.platform.platform_id}.svg", report)
generated.append(generated_file(radar, project_root, f"Radar SVG humano da plataforma {report.platform.platform_id}.", "grafico radar", "svg", relation_to_order))
goals = write_human_goals(project_root, platform_reports)
generated.append(generated_file(goals, project_root, "Metas humanas por plataforma.", "metas humanas", "markdown", relation_to_order))
screen_map = write_screen_report_map(project_root, platform_reports)
generated.append(generated_file(screen_map, project_root, "Mapa de telas e relatorios esperados.", "mapa de superficie", "markdown", relation_to_order))
order_summary = project_root / "os-orientadoras" / "ordens-de-saida.json"
write_json(order_summary, exit_orders)
generated.append(generated_file(order_summary, project_root, "Ordens de saida em JSON.", "ordens orientadoras", "json", relation_to_order))
quality_json = write_json(project_root / "dados" / "quality-gates.json", quality_reports)
generated.append(generated_file(quality_json, project_root, "Quality gates em JSON.", "quality gates", "json", relation_to_order))
questions_json = write_json(project_root / "dados" / "perguntas-humanas.json", human_questions)
generated.append(generated_file(questions_json, project_root, "Perguntas humanas em JSON.", "perguntas humanas", "json", relation_to_order))
playbooks_json = write_json(project_root / "dados" / "playbooks-humanos.json", playbooks)
generated.append(generated_file(playbooks_json, project_root, "Playbooks humanos em JSON.", "playbooks", "json", relation_to_order))
commands_json = write_json(project_root / "dados" / "comandos-humanos-equivalentes.json", command_specs)
generated.append(generated_file(commands_json, project_root, "Comandos humanos equivalentes em JSON.", "comandos", "json", relation_to_order))
insights_json = write_json(project_root / "dados" / "insights-operacionais.json", insights)
generated.append(generated_file(insights_json, project_root, "Insights operacionais em JSON.", "insights", "json", relation_to_order))
index_html = write_index_html(project_root / "ecossistema" / "index.html", platform_reports, quality_reports)
generated.append(generated_file(index_html, project_root, "Indice HTML local para revisao dos relatorios humanos.", "html operacional", "html", relation_to_order))
governance_export_bundle = write_governance_exports(
project_root,
governance_exports(
project_root,
governance_portfolio,
readiness_registry,
workflow_portfolio,
scenario_portfolio,
evidence_graph,
governance_questions,
budget=line_budget,
compiled_orders=governance_orders,
lifecycle=lifecycle_package,
),
relation_to_order,
)
generated.extend(governance_export_bundle.generated_records)
governance_snapshot_path = project_root / "dados" / "snapshot-governanca-atual.json"
previous_governance_snapshot = load_governance_snapshot(governance_snapshot_path)
current_governance_snapshot = snapshot_from_portfolio(governance_portfolio)
write_governance_snapshot(governance_snapshot_path, current_governance_snapshot)
generated.append(generated_file(governance_snapshot_path, project_root, "Snapshot compacto de governanca operacional.", "snapshot governanca", "json", relation_to_order))
governance_delta_path = project_root / "ecossistema" / "DELTA-GOVERNANCA-OPERACIONAL.md"
governance_delta_path.write_text(
governance_delta_markdown(diff_governance_snapshots(previous_governance_snapshot, current_governance_snapshot)),
encoding="utf-8",
)
generated.append(generated_file(governance_delta_path, project_root, "Delta de governanca operacional.", "delta governanca", "markdown", relation_to_order))
governance_delta_csv = project_root / "matrizes" / "delta-governanca-operacional.csv"
governance_delta_csv.write_text(
rows_to_csv(governance_delta_rows(diff_governance_snapshots(previous_governance_snapshot, current_governance_snapshot))),
encoding="utf-8",
)
generated.append(generated_file(governance_delta_csv, project_root, "Delta de governanca operacional em CSV.", "delta governanca", "csv", relation_to_order))
if central_platform_folder is not None:
written_orders = write_orders(exit_orders, central_platform_folder)
write_json(order_summary, exit_orders)
for path in written_orders:
generated.append(generated_file(path, project_root, "Ordem de saida criada na central.", "ordem de servico", "markdown", relation_to_order))
reports_dir = central_platform_folder / "reports"
reports_dir.mkdir(parents=True, exist_ok=True)
executed = reports_dir / "EXECUTADO__fundacao-tudo-para-ia-mais-humana.md"
executed.write_text(executed_order_markdown(platform_reports, exit_orders), encoding="utf-8")
generated.append(generated_file(executed, project_root, "Registro EXECUTADO da rodada.", "registro de execucao", "markdown", relation_to_order))
pending = reports_dir / "PENDENCIAS-CODEX__fundacao-tudo-para-ia-mais-humana.md"
pending.write_text(pending_markdown(platform_reports, push_status=push_status), encoding="utf-8")
generated.append(generated_file(pending, project_root, "Registro de pendencias reais da rodada.", "pendencias", "markdown", relation_to_order))
audit_dir = central_platform_folder / "audit"
audit_dir.mkdir(parents=True, exist_ok=True)
audit = audit_dir / "AUDITORIA-GPT__fundacao-tudo-para-ia-mais-humana.md"
audit.write_text(audit_markdown(platform_reports, exit_orders), encoding="utf-8")
generated.append(generated_file(audit, project_root, "Auditoria da rodada.", "auditoria", "markdown", relation_to_order))
operational_executed = reports_dir / "EXECUTADO__rodada-operacional-mais-humana.md"
operational_executed.write_text(dossier_to_markdown(round_dossier), encoding="utf-8")
generated.append(generated_file(operational_executed, project_root, "Registro EXECUTADO operacional com dossie humano.", "registro de execucao", "markdown", relation_to_order))
operational_pending = reports_dir / "PENDENCIAS-CODEX__rodada-operacional-mais-humana.md"
operational_pending.write_text("\n".join(["# Pendencias operacionais consolidadas", ""] + [f"- {item}" for item in round_dossier.pending_items]) + "\n", encoding="utf-8")
generated.append(generated_file(operational_pending, project_root, "Pendencias consolidadas do dossie operacional.", "pendencias", "markdown", relation_to_order))
operational_audit = audit_dir / "AUDITORIA-GPT__rodada-operacional-mais-humana.md"
operational_audit.write_text(order_justifications_markdown(round_dossier), encoding="utf-8")
generated.append(generated_file(operational_audit, project_root, "Auditoria operacional das ordens tratadas.", "auditoria", "markdown", relation_to_order))
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
provisional_bundle = ReportBundle(
output_root=str(project_root),
generated_files=tuple(generated),
platform_count=len(platform_reports),
profile_count=len(HUMAN_PROFILES),
matrix_cells=len(cells),
total_code_lines_analyzed=ecosystem_report.total_code_lines,
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
)
acceptance = build_acceptance_report(project_root, platform_reports, exit_orders, provisional_bundle)
acceptance_path = project_root / "ecossistema" / "ACCEPTANCE-CHECKLIST-MAIS-HUMANA.md"
acceptance_path.write_text(acceptance_markdown(acceptance), encoding="utf-8")
generated.append(generated_file(acceptance_path, project_root, "Checklist de aceite da rodada.", "acceptance", "markdown", relation_to_order))
redaction = scan_generated_artifacts(project_root)
redaction_path = project_root / "ecossistema" / "REDACTION-CHECK-MAIS-HUMANA.md"
redaction_path.write_text(redaction_markdown(redaction), encoding="utf-8")
generated.append(generated_file(redaction_path, project_root, "Checagem textual de vazamento de segredos.", "redaction", "markdown", relation_to_order))
snapshot_path = project_root / "dados" / "snapshot-score-atual.json"
previous_snapshot = load_snapshot(snapshot_path)
current_snapshot = snapshot_from_reports(platform_reports)
write_snapshot(snapshot_path, current_snapshot)
generated.append(generated_file(snapshot_path, project_root, "Snapshot compacto de score por plataforma.", "snapshot", "json", relation_to_order))
delta_path = project_root / "ecossistema" / "DELTA-MATURIDADE-HUMANA.md"
delta_path.write_text(snapshot_delta_markdown(diff_snapshots(previous_snapshot, current_snapshot)), encoding="utf-8")
generated.append(generated_file(delta_path, project_root, "Delta de maturidade humana contra snapshot anterior.", "delta", "markdown", relation_to_order))
if central_platform_folder is not None:
central_bundle = ReportBundle(
output_root=str(project_root),
generated_files=tuple(generated),
platform_count=len(platform_reports),
profile_count=len(HUMAN_PROFILES),
matrix_cells=len(cells),
total_code_lines_analyzed=ecosystem_report.total_code_lines,
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
)
for path in write_central_status_pages(central_platform_folder, central_bundle, platform_reports, exit_orders):
generated.append(generated_file(path, project_root, "Pagina de estado/indice da central.", "estado central", "markdown", relation_to_order))
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
final_bundle = ReportBundle(
output_root=str(project_root),
generated_files=tuple(generated),
platform_count=len(platform_reports),
profile_count=len(HUMAN_PROFILES),
matrix_cells=len(cells),
total_code_lines_analyzed=ecosystem_report.total_code_lines,
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
)
assurance = build_assurance_suite(
project_root=project_root,
bundle=final_bundle,
platform_reports=platform_reports,
portfolio=governance_portfolio,
lifecycle_package=lifecycle_package,
compiled_orders=governance_orders,
central_folder=central_platform_folder,
extra_text=(push_status or "",),
)
assurance_json = write_json(project_root / "dados" / "assurance-rodada.json", assurance)
generated.append(generated_file(assurance_json, project_root, "Assurance da rodada em JSON.", "assurance", "json", relation_to_order))
assurance_md = project_root / "ecossistema" / "ASSURANCE-RODADA-MAIS-HUMANA.md"
assurance_md.write_text(assurance_markdown(assurance), encoding="utf-8")
generated.append(generated_file(assurance_md, project_root, "Assurance da rodada em Markdown.", "assurance", "markdown", relation_to_order))
assurance_csv = project_root / "matrizes" / "assurance-rodada.csv"
assurance_csv.write_text(rows_to_csv(assurance_rows(assurance)), encoding="utf-8")
generated.append(generated_file(assurance_csv, project_root, "Assurance da rodada em CSV.", "assurance", "csv", relation_to_order))
if central_platform_folder is not None and lifecycle_package is not None:
for path in write_central_lifecycle_exports(central_platform_folder, lifecycle_package):
generated.append(generated_file(path, project_root, "Fechamento lifecycle das ordens ativas na central.", "lifecycle central", "markdown", relation_to_order))
reconciled = build_reconciled_status(
governance_portfolio,
readiness_registry,
workflow_portfolio,
scenario_portfolio,
lifecycle=lifecycle_package,
budget=line_budget,
assurance=assurance,
)
for path in write_reconciled_status(central_platform_folder, reconciled):
generated.append(generated_file(path, project_root, "Estado reconciliado da central.", "estado reconciliado", "markdown", relation_to_order))
write_governance_semantic_state(
central_platform_folder / "controle-semantico.sqlite",
governance_portfolio,
readiness_registry,
workflow_portfolio,
scenario_portfolio,
assurance=assurance,
lifecycle=lifecycle_package,
budget=line_budget,
)
final_bundle = ReportBundle(
output_root=str(project_root),
generated_files=tuple(generated),
platform_count=len(platform_reports),
profile_count=len(HUMAN_PROFILES),
matrix_cells=len(cells),
total_code_lines_analyzed=ecosystem_report.total_code_lines,
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
)
contract = build_contract(final_bundle, platform_reports)
contract_json = write_json(project_root / "dados" / "contrato-publico-mais-humana.json", contract)
generated.append(generated_file(contract_json, project_root, "Contrato publico JSON da plataforma Mais Humana.", "contrato", "json", relation_to_order))
contract_md = project_root / "ecossistema" / "CONTRATO-PUBLICO-MAIS-HUMANA.md"
contract_md.write_text(contract_markdown(contract), encoding="utf-8")
generated.append(generated_file(contract_md, project_root, "Contrato publico Markdown da plataforma Mais Humana.", "contrato", "markdown", relation_to_order))
if central_platform_folder is not None:
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
return ReportBundle(
output_root=str(project_root),
generated_files=tuple(generated),
platform_count=len(platform_reports),
profile_count=len(HUMAN_PROFILES),
matrix_cells=len(cells),
total_code_lines_analyzed=ecosystem_report.total_code_lines,
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
)

View File

@@ -0,0 +1,393 @@
"""Assurance checks for a full Mais Humana service-order round."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Sequence
from .exit_order_compiler import CompiledOrderSet
from .governance_models import EcosystemGovernancePortfolio, RoundExecutionPackage, RoundMinimumStatus
from .models import PlatformHumanReport, ReportBundle, as_plain_data, merge_unique
@dataclass(slots=True)
class AssuranceCase:
"""One assurance case expected at round closeout."""
case_id: str
title: str
required: bool
passed: bool
severity: str
evidence: tuple[str, ...]
reason: str
next_action: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class AssuranceSuite:
"""Complete assurance result for the round."""
suite_id: str
cases: tuple[AssuranceCase, ...]
passed: bool
blocker_count: int
warning_count: int
summary: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@property
def failed_cases(self) -> tuple[AssuranceCase, ...]:
return tuple(case for case in self.cases if not case.passed)
def case(
case_id: str,
title: str,
passed: bool,
reason: str,
next_action: str,
evidence: Iterable[str] = (),
severity: str = "warning",
required: bool = True,
) -> AssuranceCase:
return AssuranceCase(
case_id=case_id,
title=title,
required=required,
passed=passed,
severity="info" if passed else severity,
evidence=tuple(str(item) for item in evidence),
reason=reason,
next_action=next_action,
)
def artifact_exists(root: Path, relative: str) -> bool:
return (root / relative).exists()
def no_node_modules(root: Path) -> bool:
if not root.exists():
return True
for path in root.rglob("node_modules"):
if path.is_dir():
return False
return True
def generated_file_paths(bundle: ReportBundle) -> set[str]:
return {item.path.replace("\\", "/") for item in bundle.generated_files}
def required_artifact_cases(project_root: Path, bundle: ReportBundle) -> tuple[AssuranceCase, ...]:
required = (
"dados/snapshot-ecossistema.json",
"dados/dossie-operacional-humano.json",
"dados/governanca-operacional.json",
"ecossistema/RELATORIO-GERAL-DO-ECOSSISTEMA-humana.md",
"ecossistema/GOVERNANCA-OPERACIONAL-MAIS-HUMANA.md",
"ecossistema/DOSSIE-OPERACIONAL-HUMANO.md",
"matrizes/matriz-plataforma-perfil.csv",
"matrizes/governanca-checks.csv",
"graficos/matriz-plataforma-perfil.svg",
"relatorios-docx/RELATORIO-GERAL-DO-ECOSSISTEMA-humana.docx",
)
paths = generated_file_paths(bundle)
output: list[AssuranceCase] = []
for relative in required:
exists = artifact_exists(project_root, relative)
output.append(
case(
f"artifact.{relative.replace('/', '.')}",
f"Artefato obrigatorio {relative}",
exists,
"Artefato encontrado." if exists else "Artefato nao foi encontrado no projeto real.",
"regenerar relatorios e conferir escrita no projeto real",
evidence=(relative, "registrado" if relative in paths else "nao registrado no bundle"),
severity="blocker",
)
)
return tuple(output)
def minimum_cases(package: RoundExecutionPackage | None) -> tuple[AssuranceCase, ...]:
if package is None:
return (
case(
"minimum.lifecycle-package",
"Pacote de lifecycle existe",
False,
"Pacote de lifecycle nao foi produzido.",
"gerar fechamento de ordens ativas",
severity="blocker",
),
)
output: list[AssuranceCase] = []
for minimum in package.minimums:
output.append(
case(
f"minimum.{minimum.minimum_id}",
minimum.title,
minimum.status == RoundMinimumStatus.MET,
minimum.reason,
minimum.next_action,
evidence=(str(minimum.actual_value), str(minimum.required_value), minimum.status.value),
severity="blocker" if minimum.status == RoundMinimumStatus.IMPOSSIBLE else "warning",
)
)
return tuple(output)
def governance_cases(portfolio: EcosystemGovernancePortfolio) -> tuple[AssuranceCase, ...]:
avg = portfolio.average_governance_score
blocked = len(portfolio.blocked_platforms)
candidates_exec = sum(1 for candidate in portfolio.order_candidates if candidate.order_type.value == "executiva")
candidates_man = sum(1 for candidate in portfolio.order_candidates if candidate.order_type.value == "gerencial")
return (
case(
"governance.portfolio",
"Portfolio de governanca criado",
bool(portfolio.cards),
f"Cards criados: {len(portfolio.cards)}; score medio {avg}.",
"reexecutar avaliacao de governanca",
evidence=(str(len(portfolio.cards)), str(avg)),
severity="blocker",
),
case(
"governance.blockers-classified",
"Blockers de governanca classificados",
bool(portfolio.blockers_summary) or blocked == 0,
f"Plataformas bloqueadas: {blocked}.",
"classificar blockers por check, dominio, plataforma e proxima acao",
evidence=portfolio.blockers_summary[:8],
severity="warning",
),
case(
"governance.executive-candidates",
"Candidatas executivas reais existem",
candidates_exec >= 5,
f"Candidatas executivas: {candidates_exec}.",
"criar checks executivos para pendencias materiais ainda nao cobertas",
evidence=(str(candidates_exec),),
severity="warning",
),
case(
"governance.managerial-candidates",
"Candidatas gerenciais reais existem",
candidates_man >= 5,
f"Candidatas gerenciais: {candidates_man}.",
"criar checks gerenciais para maturidade e relacoes de ecossistema",
evidence=(str(candidates_man),),
severity="warning",
),
)
def compiled_order_cases(compiled: CompiledOrderSet | None) -> tuple[AssuranceCase, ...]:
if compiled is None:
return ()
return (
case(
"orders.compiled-executive",
"Ordens executivas compiladas",
compiled.executive_count >= 5,
f"Executivas compiladas: {compiled.executive_count}.",
"usar candidatas de governanca ou recomendacoes reais para completar saida",
evidence=(str(compiled.executive_count),),
severity="warning",
),
case(
"orders.compiled-managerial",
"Ordens gerenciais compiladas",
compiled.managerial_count >= 5,
f"Gerenciais compiladas: {compiled.managerial_count}.",
"usar candidatas de governanca ou recomendacoes reais para completar saida",
evidence=(str(compiled.managerial_count),),
severity="warning",
),
)
def platform_cases(reports: Sequence[PlatformHumanReport]) -> tuple[AssuranceCase, ...]:
total = len(reports)
with_code = sum(1 for report in reports if report.scan.code_lines > 0)
with_evidence = sum(1 for report in reports if report.scan.evidence)
return (
case(
"platforms.count",
"Catalogo de 14 plataformas analisado",
total >= 14,
f"Plataformas no relatorio: {total}.",
"revisar catalogo canonico de plataformas",
evidence=(str(total),),
severity="blocker",
),
case(
"platforms.code-evidence",
"Evidencia tecnica encontrada em plataformas",
with_code > 0,
f"Plataformas com codigo detectado: {with_code}.",
"verificar raiz do ecossistema e budgets de scanner",
evidence=(str(with_code),),
severity="warning",
),
case(
"platforms.local-evidence",
"Evidencias locais coletadas",
with_evidence > 0,
f"Plataformas com evidencias: {with_evidence}.",
"ampliar scanner ou registrar ausencia material",
evidence=(str(with_evidence),),
severity="warning",
),
)
def operational_hygiene_cases(project_root: Path, central_folder: Path | None) -> tuple[AssuranceCase, ...]:
cases = [
case(
"hygiene.no-node-modules",
"node_modules removido do projeto real",
no_node_modules(project_root),
"Nenhuma pasta node_modules local foi encontrada." if no_node_modules(project_root) else "node_modules foi encontrado.",
"remover node_modules antes de sincronizar",
evidence=(str(project_root),),
severity="blocker",
)
]
if central_folder is not None:
cases.append(
case(
"hygiene.semantic-sql",
"SQLite semantico existe",
(central_folder / "controle-semantico.sqlite").exists(),
"SQLite semantico encontrado." if (central_folder / "controle-semantico.sqlite").exists() else "SQLite semantico ausente.",
"executar write_semantic_state e registrar funcoes de arquivos",
evidence=(str(central_folder / "controle-semantico.sqlite"),),
severity="blocker",
)
)
return tuple(cases)
def cloudflare_premise_cases(extra_text: Sequence[str]) -> tuple[AssuranceCase, ...]:
text = "\n".join(extra_text).lower()
plugin_attempted = "plugin cloudflare" in text or "user rejected mcp tool call" in text or "mcp tool call" in text
plugin_as_blocker = "plugin cloudflare" in text and ("blocker" in text or "bloqueio" in text)
return (
case(
"cloudflare.plugin-tested",
"Teste inicial do plugin Cloudflare registrado",
plugin_attempted,
"Tentativa do plugin Cloudflare foi informada no contexto da rodada." if plugin_attempted else "Nao ha registro textual da tentativa do plugin.",
"registrar somente a tentativa, sem tratar falha como bloqueio",
evidence=extra_text[:4],
severity="warning",
required=False,
),
case(
"cloudflare.plugin-not-blocker",
"Falha do plugin Cloudflare nao virou blocker",
not plugin_as_blocker,
"Nao foi detectada classificacao direta da falha do plugin como blocker.",
"remover qualquer pendencia que use o plugin como impedimento operacional",
evidence=extra_text[:4],
severity="blocker",
),
)
def build_assurance_suite(
project_root: Path,
bundle: ReportBundle,
platform_reports: Sequence[PlatformHumanReport],
portfolio: EcosystemGovernancePortfolio,
lifecycle_package: RoundExecutionPackage | None = None,
compiled_orders: CompiledOrderSet | None = None,
central_folder: Path | None = None,
extra_text: Sequence[str] = (),
) -> AssuranceSuite:
cases: list[AssuranceCase] = []
cases.extend(required_artifact_cases(project_root, bundle))
cases.extend(platform_cases(platform_reports))
cases.extend(governance_cases(portfolio))
cases.extend(compiled_order_cases(compiled_orders))
cases.extend(minimum_cases(lifecycle_package))
cases.extend(operational_hygiene_cases(project_root, central_folder))
cases.extend(cloudflare_premise_cases(extra_text))
blocker_count = sum(1 for item in cases if not item.passed and item.required and item.severity == "blocker")
warning_count = sum(1 for item in cases if not item.passed and item.severity != "blocker")
summary = (
f"Casos de assurance: {len(cases)}",
f"Blockers de assurance: {blocker_count}",
f"Warnings de assurance: {warning_count}",
f"Artefatos gerados no bundle: {len(bundle.generated_files)}",
f"Plataformas no portfolio: {len(portfolio.cards)}",
)
return AssuranceSuite(
suite_id="mais-humana.assurance.v1",
cases=tuple(cases),
passed=blocker_count == 0,
blocker_count=blocker_count,
warning_count=warning_count,
summary=summary,
)
def assurance_markdown(suite: AssuranceSuite) -> str:
lines = [
"# Assurance da rodada Mais Humana",
"",
f"- suite_id: `{suite.suite_id}`",
f"- passed: `{suite.passed}`",
f"- blockers: `{suite.blocker_count}`",
f"- warnings: `{suite.warning_count}`",
"",
"## Sumario",
"",
]
lines.extend(f"- {item}" for item in suite.summary)
lines.extend(["", "## Casos", ""])
for item in suite.cases:
status = "ok" if item.passed else item.severity
lines.append(f"### {item.case_id}")
lines.append("")
lines.append(f"- status: `{status}`")
lines.append(f"- obrigatorio: `{item.required}`")
lines.append(f"- titulo: {item.title}")
lines.append(f"- razao: {item.reason}")
lines.append(f"- proxima_acao: {item.next_action}")
if item.evidence:
lines.append("- evidencias:")
for evidence in item.evidence[:8]:
lines.append(f" - `{evidence}`")
lines.append("")
return "\n".join(lines).strip() + "\n"
def assurance_rows(suite: AssuranceSuite) -> list[list[str]]:
rows = [["case_id", "passed", "severity", "required", "title", "reason", "next_action"]]
for item in suite.cases:
rows.append(
[
item.case_id,
"yes" if item.passed else "no",
item.severity,
"yes" if item.required else "no",
item.title,
item.reason,
item.next_action,
]
)
return rows
def assurance_pending_items(suite: AssuranceSuite) -> tuple[str, ...]:
return merge_unique(f"{case.case_id}: {case.next_action}" for case in suite.failed_cases)

View File

@@ -0,0 +1,548 @@
"""Line-budget and scan-budget helpers for full service-order rounds."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Iterator, Sequence
from .catalog import PLATFORMS
from .models import as_plain_data, merge_unique
CODE_EXTENSIONS = {".py", ".ts", ".tsx", ".js", ".mjs", ".cjs", ".java"}
TECHNICAL_EXTENSIONS = CODE_EXTENSIONS | {".md", ".mdx", ".json", ".toml", ".yml", ".yaml", ".sql"}
SKIP_DIRS = {
".git",
".test-tmp",
"node_modules",
"dist",
"build",
"coverage",
"__pycache__",
".pytest_cache",
".mypy_cache",
".wrangler",
".next",
".nuxt",
"vendor",
}
@dataclass(slots=True)
class FileLineBudget:
path: str
extension: str
lines: int
bytes_size: int
counted_as_code: bool
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class RepositoryLineBudget:
repo_name: str
repo_path: str
exists: bool
files_seen: int
files_counted: int
code_lines: int
technical_lines: int
largest_files: tuple[FileLineBudget, ...]
warnings: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class RoundLineBudget:
repositories: tuple[RepositoryLineBudget, ...]
total_code_lines: int
total_technical_lines: int
reading_minimum: int
production_minimum: int
reading_minimum_met: bool
production_minimum_met: bool
summary: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@property
def project_budget(self) -> RepositoryLineBudget | None:
return self.repositories[0] if self.repositories else None
@property
def missing_reading_lines(self) -> int:
return max(0, self.reading_minimum - self.total_technical_lines)
@property
def missing_production_lines(self) -> int:
current = self.project_budget.code_lines if self.project_budget is not None else 0
return max(0, self.production_minimum - current)
def should_skip(path: Path) -> bool:
return path.name in SKIP_DIRS
def iter_files(root: Path, max_files: int = 20_000) -> Iterator[Path]:
if not root.exists():
return
stack = [root]
seen = 0
while stack and seen < max_files:
current = stack.pop()
try:
entries = sorted(current.iterdir(), key=lambda item: item.name.lower())
except OSError:
continue
for entry in entries:
if entry.is_dir():
if not should_skip(entry):
stack.append(entry)
continue
if entry.is_file():
seen += 1
yield entry
if seen >= max_files:
break
def count_lines(path: Path, max_bytes: int = 650_000) -> int:
try:
if path.stat().st_size > max_bytes:
return 0
with path.open("r", encoding="utf-8", errors="ignore") as handle:
return sum(1 for _ in handle)
except OSError:
return 0
def safe_relative(path: Path, base: Path) -> str:
try:
return str(path.relative_to(base)).replace("\\", "/")
except ValueError:
return str(path).replace("\\", "/")
def file_budget(path: Path, base: Path, max_bytes: int) -> FileLineBudget | None:
suffix = path.suffix.lower()
if suffix not in TECHNICAL_EXTENSIONS:
return None
try:
size = path.stat().st_size
except OSError:
return None
lines = count_lines(path, max_bytes=max_bytes)
return FileLineBudget(
path=safe_relative(path, base),
extension=suffix,
lines=lines,
bytes_size=size,
counted_as_code=suffix in CODE_EXTENSIONS,
)
def repository_line_budget(
repo_path: Path,
repo_name: str | None = None,
max_files: int = 20_000,
max_bytes_per_file: int = 650_000,
) -> RepositoryLineBudget:
repo_name = repo_name or repo_path.name
if not repo_path.exists():
return RepositoryLineBudget(
repo_name=repo_name,
repo_path=str(repo_path),
exists=False,
files_seen=0,
files_counted=0,
code_lines=0,
technical_lines=0,
largest_files=(),
warnings=("repositorio nao encontrado",),
)
budgets: list[FileLineBudget] = []
files_seen = 0
for path in iter_files(repo_path, max_files=max_files):
files_seen += 1
item = file_budget(path, repo_path, max_bytes=max_bytes_per_file)
if item is not None:
budgets.append(item)
code_lines = sum(item.lines for item in budgets if item.counted_as_code)
technical_lines = sum(item.lines for item in budgets)
warnings: list[str] = []
if files_seen >= max_files:
warnings.append(f"limite de arquivos atingido: {max_files}")
if code_lines == 0:
warnings.append("nenhuma linha de codigo Python/TS/JS/Java contada")
largest = tuple(sorted(budgets, key=lambda item: item.lines, reverse=True)[:20])
return RepositoryLineBudget(
repo_name=repo_name,
repo_path=str(repo_path),
exists=True,
files_seen=files_seen,
files_counted=len(budgets),
code_lines=code_lines,
technical_lines=technical_lines,
largest_files=largest,
warnings=tuple(warnings),
)
def platform_budget_roots(ecosystem_root: Path) -> tuple[tuple[str, Path], ...]:
return tuple((platform.repo_name, ecosystem_root / platform.repo_name) for platform in PLATFORMS)
def build_round_line_budget(
ecosystem_root: Path,
project_root: Path,
include_all_platforms: bool = True,
extra_roots: Sequence[Path] = (),
reading_minimum: int = 10_000,
production_minimum: int = 5_500,
) -> RoundLineBudget:
repo_specs: list[tuple[str, Path]] = []
repo_specs.append((project_root.name, project_root))
if include_all_platforms:
repo_specs.extend(platform_budget_roots(ecosystem_root))
for root in extra_roots:
repo_specs.append((root.name, root))
seen: set[str] = set()
budgets: list[RepositoryLineBudget] = []
for name, path in repo_specs:
key = str(path).lower()
if key in seen:
continue
seen.add(key)
budgets.append(repository_line_budget(path, name))
total_code = sum(item.code_lines for item in budgets)
total_technical = sum(item.technical_lines for item in budgets)
project_budget = next((item for item in budgets if Path(item.repo_path).resolve() == project_root.resolve()), None)
production_lines = project_budget.code_lines if project_budget is not None else 0
summary = (
f"Repositorios avaliados: {len(budgets)}",
f"Linhas tecnicas totais: {total_technical}",
f"Linhas de codigo totais: {total_code}",
f"Linhas de codigo do projeto real: {production_lines}",
f"Minimo de leitura cumprido: {total_technical >= reading_minimum}",
f"Minimo de producao no projeto cumprido: {production_lines >= production_minimum}",
)
return RoundLineBudget(
repositories=tuple(budgets),
total_code_lines=total_code,
total_technical_lines=total_technical,
reading_minimum=reading_minimum,
production_minimum=production_minimum,
reading_minimum_met=total_technical >= reading_minimum,
production_minimum_met=production_lines >= production_minimum,
summary=summary,
)
def budget_markdown(budget: RoundLineBudget) -> str:
lines = ["# Budget de linhas da rodada", ""]
lines.extend(f"- {item}" for item in budget.summary)
lines.extend(["", "## Repositorios", ""])
for repo in sorted(budget.repositories, key=lambda item: item.repo_name):
lines.append(f"### {repo.repo_name}")
lines.append("")
lines.append(f"- existe: `{repo.exists}`")
lines.append(f"- arquivos vistos: `{repo.files_seen}`")
lines.append(f"- arquivos contados: `{repo.files_counted}`")
lines.append(f"- linhas codigo: `{repo.code_lines}`")
lines.append(f"- linhas tecnicas: `{repo.technical_lines}`")
if repo.warnings:
lines.append("- warnings:")
for warning in repo.warnings:
lines.append(f" - {warning}")
lines.append("- maiores arquivos:")
for item in repo.largest_files[:8]:
lines.append(f" - `{item.path}`: `{item.lines}` linhas")
lines.append("")
return "\n".join(lines).strip() + "\n"
def budget_rows(budget: RoundLineBudget) -> list[list[str]]:
rows = [["repo", "exists", "files_seen", "files_counted", "code_lines", "technical_lines", "warnings"]]
for repo in sorted(budget.repositories, key=lambda item: item.repo_name):
rows.append(
[
repo.repo_name,
"yes" if repo.exists else "no",
str(repo.files_seen),
str(repo.files_counted),
str(repo.code_lines),
str(repo.technical_lines),
" | ".join(repo.warnings),
]
)
return rows
def budget_pending_items(budget: RoundLineBudget) -> tuple[str, ...]:
items: list[str] = []
if not budget.reading_minimum_met:
items.append(
f"minimo de leitura nao cumprido: {budget.total_technical_lines}/{budget.reading_minimum}; ampliar base material ou registrar ausencia real"
)
if not budget.production_minimum_met:
project = budget.repositories[0] if budget.repositories else None
current = project.code_lines if project is not None else 0
items.append(
f"minimo de producao nao cumprido no projeto real: {current}/{budget.production_minimum}; implementar codigo util adicional ou justificar impossibilidade"
)
for repo in budget.repositories:
for warning in repo.warnings:
items.append(f"{repo.repo_name}: {warning}")
return merge_unique(items)
def budget_recommendations(budget: RoundLineBudget) -> tuple[str, ...]:
recommendations: list[str] = []
if budget.reading_minimum_met:
recommendations.append("manter leitura minima comprovada em reports e SQL semantico")
else:
recommendations.append(
f"ampliar leitura tecnica/documental em {budget.missing_reading_lines} linhas ou registrar ausencia material"
)
if budget.production_minimum_met:
recommendations.append("manter producao util de codigo acima do minimo da rodada")
else:
recommendations.append(
f"produzir mais {budget.missing_production_lines} linhas uteis em Python/TS/JS/Java ou justificar impossibilidade real"
)
missing_repos = [repo.repo_name for repo in budget.repositories if not repo.exists]
if missing_repos:
recommendations.append("resolver espelhos ausentes: " + ", ".join(missing_repos[:8]))
zero_code = [repo.repo_name for repo in budget.repositories if repo.exists and repo.code_lines == 0]
if zero_code:
recommendations.append("classificar repositorios sem codigo contado: " + ", ".join(zero_code[:8]))
heavy = sorted([repo for repo in budget.repositories if repo.technical_lines > 50_000], key=lambda item: item.technical_lines, reverse=True)
if heavy:
recommendations.append("usar scanner com budget em repositorios grandes: " + ", ".join(repo.repo_name for repo in heavy[:5]))
return merge_unique(recommendations)
def production_status_label(budget: RoundLineBudget) -> str:
if budget.reading_minimum_met and budget.production_minimum_met:
return "minimos-cumpridos"
if budget.reading_minimum_met and not budget.production_minimum_met:
return "leitura-ok-producao-parcial"
if not budget.reading_minimum_met and budget.production_minimum_met:
return "producao-ok-leitura-parcial"
return "minimos-parciais"
def budget_status_payload(budget: RoundLineBudget) -> dict[str, object]:
project = budget.project_budget
return {
"status": production_status_label(budget),
"reading_minimum": budget.reading_minimum,
"production_minimum": budget.production_minimum,
"total_technical_lines": budget.total_technical_lines,
"total_code_lines": budget.total_code_lines,
"project_code_lines": project.code_lines if project is not None else 0,
"missing_reading_lines": budget.missing_reading_lines,
"missing_production_lines": budget.missing_production_lines,
"recommendations": budget_recommendations(budget),
}
def repository_budget_markdown(repo: RepositoryLineBudget) -> str:
lines = [
f"## {repo.repo_name}",
"",
f"- caminho: `{repo.repo_path}`",
f"- existe: `{repo.exists}`",
f"- arquivos vistos: `{repo.files_seen}`",
f"- arquivos contados: `{repo.files_counted}`",
f"- linhas codigo: `{repo.code_lines}`",
f"- linhas tecnicas: `{repo.technical_lines}`",
]
if repo.warnings:
lines.append("- warnings:")
lines.extend(f" - {warning}" for warning in repo.warnings)
if repo.largest_files:
lines.append("- maiores arquivos tecnicos:")
for item in repo.largest_files[:12]:
kind = "codigo" if item.counted_as_code else "tecnico"
lines.append(f" - `{item.path}`: `{item.lines}` linhas ({kind})")
return "\n".join(lines).strip() + "\n"
def budget_recommendations_markdown(budget: RoundLineBudget) -> str:
lines = [
"# Recomendacoes do budget de linhas",
"",
f"- status: `{production_status_label(budget)}`",
f"- faltam linhas leitura: `{budget.missing_reading_lines}`",
f"- faltam linhas producao: `{budget.missing_production_lines}`",
"",
]
for recommendation in budget_recommendations(budget):
lines.append(f"- {recommendation}")
return "\n".join(lines).strip() + "\n"
def top_repositories_by_code(budget: RoundLineBudget, limit: int = 10) -> tuple[RepositoryLineBudget, ...]:
return tuple(sorted(budget.repositories, key=lambda item: item.code_lines, reverse=True)[:limit])
def top_repositories_by_technical_lines(budget: RoundLineBudget, limit: int = 10) -> tuple[RepositoryLineBudget, ...]:
return tuple(sorted(budget.repositories, key=lambda item: item.technical_lines, reverse=True)[:limit])
def budget_focus_rows(budget: RoundLineBudget) -> list[list[str]]:
rows = [["rank", "repo", "code_lines", "technical_lines", "warnings", "focus"]]
for index, repo in enumerate(top_repositories_by_technical_lines(budget), start=1):
if not repo.exists:
focus = "recuperar espelho"
elif repo.code_lines == 0:
focus = "classificar ausencia de codigo"
elif repo.technical_lines > 100_000:
focus = "usar scanner com budget"
else:
focus = "manter leitura normal"
rows.append(
[
str(index),
repo.repo_name,
str(repo.code_lines),
str(repo.technical_lines),
" | ".join(repo.warnings),
focus,
]
)
return rows
def build_order_reason_from_budget(budget: RoundLineBudget) -> str:
if budget.production_minimum_met and budget.reading_minimum_met:
return "Minimos de leitura e producao foram cumpridos; continuidade deve focar maturidade, nao volume."
parts: list[str] = []
if not budget.reading_minimum_met:
parts.append(f"leitura tecnica ficou {budget.missing_reading_lines} linhas abaixo do minimo")
if not budget.production_minimum_met:
parts.append(f"producao util ficou {budget.missing_production_lines} linhas abaixo do minimo")
return "; ".join(parts) + "."
def classify_repository_size(repo: RepositoryLineBudget) -> str:
if not repo.exists:
return "missing"
if repo.technical_lines >= 250_000:
return "very_large"
if repo.technical_lines >= 75_000:
return "large"
if repo.technical_lines >= 10_000:
return "medium"
if repo.technical_lines > 0:
return "small"
return "empty"
def scan_strategy_for_repo(repo: RepositoryLineBudget) -> str:
size = classify_repository_size(repo)
if size == "missing":
return "recuperar ou clonar repositorio antes de exigir leitura"
if size == "very_large":
return "usar leitura amostrada por dominios e arquivos de contrato antes de varredura total"
if size == "large":
return "priorizar src, tests, contratos, README e reports recentes"
if size == "medium":
return "varredura completa e viavel com budget padrao"
if size == "small":
return "ler tudo e registrar limitacao se faltar base material"
return "registrar ausencia de base tecnica e criar OS de estruturacao"
def scan_strategy_rows(budget: RoundLineBudget) -> list[list[str]]:
rows = [["repo", "size", "strategy", "technical_lines", "code_lines"]]
for repo in sorted(budget.repositories, key=lambda item: item.repo_name):
rows.append(
[
repo.repo_name,
classify_repository_size(repo),
scan_strategy_for_repo(repo),
str(repo.technical_lines),
str(repo.code_lines),
]
)
return rows
def budget_strategy_markdown(budget: RoundLineBudget) -> str:
lines = ["# Estrategia de leitura por budget", ""]
lines.append(f"- status: `{production_status_label(budget)}`")
lines.append(f"- repositorios: `{len(budget.repositories)}`")
lines.append("")
for repo in sorted(budget.repositories, key=lambda item: (classify_repository_size(item), item.repo_name)):
lines.append(f"## {repo.repo_name}")
lines.append("")
lines.append(f"- tamanho: `{classify_repository_size(repo)}`")
lines.append(f"- linhas tecnicas: `{repo.technical_lines}`")
lines.append(f"- linhas codigo: `{repo.code_lines}`")
lines.append(f"- estrategia: {scan_strategy_for_repo(repo)}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def budget_repository_lookup(budget: RoundLineBudget, repo_name: str) -> RepositoryLineBudget | None:
lowered = repo_name.lower()
for repo in budget.repositories:
if repo.repo_name.lower() == lowered or Path(repo.repo_path).name.lower() == lowered:
return repo
return None
def budget_completion_ratio(budget: RoundLineBudget) -> dict[str, float]:
project = budget.project_budget
project_code = project.code_lines if project is not None else 0
reading_ratio = min(1.0, budget.total_technical_lines / budget.reading_minimum) if budget.reading_minimum else 1.0
production_ratio = min(1.0, project_code / budget.production_minimum) if budget.production_minimum else 1.0
return {
"reading": round(reading_ratio, 4),
"production": round(production_ratio, 4),
"combined": round((reading_ratio + production_ratio) / 2, 4),
}
def budget_completion_sentence(budget: RoundLineBudget) -> str:
ratios = budget_completion_ratio(budget)
return (
f"Leitura {ratios['reading']:.0%}, producao {ratios['production']:.0%}, "
f"completude combinada {ratios['combined']:.0%}."
)
def budget_is_operationally_sufficient(budget: RoundLineBudget) -> bool:
ratios = budget_completion_ratio(budget)
return ratios["reading"] >= 1.0 and ratios["production"] >= 1.0
def budget_gate_label(budget: RoundLineBudget) -> str:
if budget_is_operationally_sufficient(budget):
return "pass"
ratios = budget_completion_ratio(budget)
if ratios["combined"] >= 0.75:
return "attention"
if ratios["combined"] >= 0.4:
return "partial"
return "blocked"
def budget_gate_summary(budget: RoundLineBudget) -> tuple[str, str]:
return (budget_gate_label(budget), budget_completion_sentence(budget))
def budget_gate_passed(budget: RoundLineBudget) -> bool:
return budget_gate_label(budget) == "pass"
def budget_gate_failed(budget: RoundLineBudget) -> bool:
return not budget_gate_passed(budget)

505
src/mais_humana/scanner.py Normal file
View File

@@ -0,0 +1,505 @@
"""Repository scanner for the human-centered platform.
The scanner is deliberately conservative. It extracts local evidence without
executing project code, without reading secrets, and without depending on a
particular package manager. The goal is not static analysis perfection; the
goal is repeatable operational context for human reports.
"""
from __future__ import annotations
import json
import os
import re
import subprocess
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Iterator, Sequence
from .catalog import CATEGORY_KEYWORDS, PLATFORMS, categories_for_text
from .models import Evidence, EvidenceKind, FileMetric, PlatformDefinition, PlatformScan, ScriptCommand
SKIP_DIRS = {
".git",
".test-tmp",
".hg",
".svn",
"node_modules",
"dist",
"build",
"coverage",
".next",
".nuxt",
".wrangler",
".turbo",
".cache",
"vendor",
"__pycache__",
".pytest_cache",
".mypy_cache",
}
TEXT_EXTENSIONS = {
".ts",
".tsx",
".js",
".mjs",
".cjs",
".py",
".java",
".json",
".md",
".mdx",
".yml",
".yaml",
".toml",
".txt",
".sql",
".html",
".css",
".scss",
".xml",
}
CODE_EXTENSIONS = {".ts", ".tsx", ".js", ".mjs", ".cjs", ".py", ".java"}
ROUTE_PATTERNS = (
re.compile(r"\bapp\.(get|post|put|patch|delete)\s*\(\s*['\"]([^'\"]+)", re.I),
re.compile(r"\brouter\.(get|post|put|patch|delete)\s*\(\s*['\"]([^'\"]+)", re.I),
re.compile(r"\bnew\s+URLPattern\s*\(\s*['\"]([^'\"]+)", re.I),
re.compile(r"\b(path|route)\s*:\s*['\"](/[^'\"]+)['\"]", re.I),
re.compile(r"\bfetch\s*\(\s*['\"](https?://[^'\"]+|/[^'\"]+)['\"]", re.I),
)
SENSITIVE_FILE_PARTS = (
".env",
"secret",
"secrets",
"private",
"key.pem",
"id_rsa",
"credential",
"credentials",
)
@dataclass(slots=True)
class ScanOptions:
"""Options for local scan depth and safety."""
max_file_bytes: int = 420_000
max_readme_chars: int = 4_000
max_evidence_per_kind: int = 40
include_markdown_metrics: bool = True
include_json_metrics: bool = True
def is_sensitive_path(path: Path) -> bool:
lowered = str(path).lower()
return any(part in lowered for part in SENSITIVE_FILE_PARTS)
def should_skip_dir(path: Path) -> bool:
return path.name in SKIP_DIRS
def is_probably_text(path: Path) -> bool:
return path.suffix.lower() in TEXT_EXTENSIONS or path.name.lower() in {"package.json", "wrangler.toml"}
def safe_relative(path: Path, base: Path) -> str:
try:
return str(path.relative_to(base)).replace("\\", "/")
except ValueError:
return str(path).replace("\\", "/")
def iter_files(root: Path) -> Iterator[Path]:
if not root.exists():
return
stack = [root]
while stack:
current = stack.pop()
try:
entries = sorted(current.iterdir(), key=lambda item: item.name.lower())
except OSError:
continue
for entry in entries:
if entry.is_dir():
if not should_skip_dir(entry):
stack.append(entry)
elif entry.is_file():
yield entry
def count_lines(path: Path, max_bytes: int) -> int:
try:
if path.stat().st_size > max_bytes:
return 0
with path.open("r", encoding="utf-8", errors="ignore") as handle:
return sum(1 for _ in handle)
except OSError:
return 0
def read_text_limited(path: Path, max_bytes: int) -> str:
try:
if path.stat().st_size > max_bytes:
return ""
return path.read_text(encoding="utf-8", errors="ignore")
except OSError:
return ""
def run_git(repo: Path, *args: str) -> str | None:
try:
completed = subprocess.run(
["git", *args],
cwd=str(repo),
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
timeout=12,
check=False,
)
except (OSError, subprocess.SubprocessError):
return None
if completed.returncode != 0:
return None
return completed.stdout.strip() or None
def detect_git(repo: Path) -> tuple[bool, str | None, str | None, str | None]:
git_present = (repo / ".git").exists()
if not git_present:
return False, None, None, None
branch = run_git(repo, "rev-parse", "--abbrev-ref", "HEAD")
head = run_git(repo, "rev-parse", "HEAD")
remote = run_git(repo, "remote", "get-url", "origin")
return True, branch, head, remote
def script_intent(name: str, command: str) -> str:
merged = f"{name} {command}".lower()
if any(token in merged for token in ("test", "vitest", "pytest", "jest", "node --test")):
return "test"
if any(token in merged for token in ("build", "tsc", "vite build", "rollup", "webpack")):
return "build"
if any(token in merged for token in ("smoke", "health", "readiness")):
return "validation"
if any(token in merged for token in ("deploy", "wrangler deploy", "pages deploy")):
return "deploy"
if any(token in merged for token in ("generate", "contract", "schema")):
return "generation"
if any(token in merged for token in ("lint", "format", "eslint", "prettier")):
return "quality"
return "operation"
def load_package_scripts(repo: Path) -> tuple[ScriptCommand, ...]:
package_path = repo / "package.json"
if not package_path.exists() or is_sensitive_path(package_path):
return ()
try:
data = json.loads(package_path.read_text(encoding="utf-8", errors="ignore"))
except (OSError, json.JSONDecodeError):
return ()
scripts = data.get("scripts", {})
if not isinstance(scripts, dict):
return ()
output: list[ScriptCommand] = []
for name, command in sorted(scripts.items()):
if isinstance(command, str):
output.append(
ScriptCommand(
name=name,
command=command,
source_file="package.json",
intent=script_intent(name, command),
)
)
return tuple(output)
def read_readme(repo: Path, max_chars: int) -> str:
candidates = [
repo / "README.md",
repo / "readme.md",
repo / "README.txt",
repo / "docs" / "README.md",
]
for candidate in candidates:
if candidate.exists() and not is_sensitive_path(candidate):
content = read_text_limited(candidate, max_chars * 8)
return content[:max_chars].strip()
return ""
def metric_for_file(path: Path, base: Path, options: ScanOptions) -> FileMetric | None:
suffix = path.suffix.lower()
if suffix not in TEXT_EXTENSIONS and path.name.lower() not in {"package.json", "wrangler.toml"}:
return None
if suffix in {".md", ".mdx"} and not options.include_markdown_metrics:
return None
if suffix == ".json" and not options.include_json_metrics:
return None
if is_sensitive_path(path):
return None
try:
size = path.stat().st_size
except OSError:
return None
lines = count_lines(path, options.max_file_bytes)
return FileMetric(path=safe_relative(path, base), extension=suffix or path.name.lower(), lines=lines, bytes_size=size)
def add_limited(bucket: dict[EvidenceKind, list[Evidence]], evidence: Evidence, limit: int) -> None:
items = bucket.setdefault(evidence.kind, [])
if len(items) < limit:
items.append(evidence)
def evidence_from_filename(path: Path, base: Path) -> Evidence | None:
relative = safe_relative(path, base)
lowered = relative.lower()
name = path.name.lower()
if is_sensitive_path(path):
return None
if name.startswith("readme"):
return Evidence(EvidenceKind.README, relative, "Documentacao inicial encontrada.", confidence=0.75, tags=("docs",))
if "openapi" in lowered or "swagger" in lowered:
return Evidence(EvidenceKind.OPENAPI, relative, "Arquivo com indicio de contrato OpenAPI.", confidence=0.8)
if "test" in lowered or "spec" in lowered:
return Evidence(EvidenceKind.TEST, relative, "Arquivo de teste ou especificacao encontrado.", confidence=0.72)
if "wrangler" in name or name in {"package.json", "pyproject.toml", "tsconfig.json"}:
return Evidence(EvidenceKind.CONFIG, relative, "Configuracao operacional encontrada.", confidence=0.65)
if "worker" in lowered or "cloudflare" in lowered:
return Evidence(EvidenceKind.WORKER, relative, "Indicador de Worker ou Cloudflare encontrado.", confidence=0.6)
if "screen" in lowered or "view" in lowered or "ui" in lowered:
return Evidence(EvidenceKind.UI_SURFACE, relative, "Possivel superficie visual encontrada.", confidence=0.55)
if "mcp" in lowered or "tool" in lowered:
return Evidence(EvidenceKind.MCP_TOOL, relative, "Possivel tool ou superficie MCP encontrada.", confidence=0.55)
return None
def evidence_from_text(path: Path, base: Path, text: str, limit: int) -> tuple[Evidence, ...]:
relative = safe_relative(path, base)
lowered = text.lower()
output: list[Evidence] = []
line_index: dict[str, int] = {}
for index, line in enumerate(text.splitlines(), start=1):
if len(line_index) > 100:
break
normalized = line.lower()
for key in (
"health",
"readiness",
"openapi",
"audit",
"trace",
"rbac",
"byok",
"credentialref",
"panelready",
"samesource",
"entitlement",
"invoice",
"incident",
"support",
"screen",
"mcp",
):
if key in normalized and key not in line_index:
line_index[key] = index
for key, line in line_index.items():
if len(output) >= limit:
break
kind = kind_for_keyword(key)
output.append(
Evidence(
kind=kind,
path=relative,
line=line,
summary=f"Texto menciona '{key}', sinalizando capacidade humana ou operacional.",
confidence=confidence_for_keyword(key),
tags=tuple(category.value for category in categories_for_text(key)),
)
)
for pattern in ROUTE_PATTERNS:
for match in pattern.finditer(text):
if len(output) >= limit:
break
route = match.group(match.lastindex or 1)
if not route:
continue
output.append(
Evidence(
kind=EvidenceKind.ROUTE,
path=relative,
summary=f"Rota ou chamada HTTP detectada: {route}",
confidence=0.66,
tags=("route",),
)
)
if len(output) >= limit:
break
return tuple(output)
def kind_for_keyword(keyword: str) -> EvidenceKind:
keyword = keyword.lower()
if keyword in {"openapi"}:
return EvidenceKind.OPENAPI
if keyword in {"audit", "trace"}:
return EvidenceKind.OBSERVABILITY
if keyword in {"rbac", "credentialref", "byok"}:
return EvidenceKind.SECURITY
if keyword in {"panelready", "samesource", "screen"}:
return EvidenceKind.UI_SURFACE
if keyword in {"mcp"}:
return EvidenceKind.MCP_TOOL
if keyword in {"entitlement", "invoice"}:
return EvidenceKind.BUSINESS_RULE
if keyword in {"incident", "support"}:
return EvidenceKind.UNKNOWN
if keyword in {"health", "readiness"}:
return EvidenceKind.OBSERVABILITY
return EvidenceKind.UNKNOWN
def confidence_for_keyword(keyword: str) -> float:
strong = {"openapi", "panelready", "samesource", "credentialref", "rbac", "byok"}
medium = {"health", "readiness", "audit", "trace", "entitlement", "invoice"}
if keyword.lower() in strong:
return 0.78
if keyword.lower() in medium:
return 0.68
return 0.55
def classify_warnings(scan: PlatformScan) -> tuple[str, ...]:
warnings: list[str] = list(scan.warnings)
if not scan.exists:
warnings.append("repositorio real nao encontrado")
if scan.exists and not scan.git_present:
warnings.append("repositorio real existe sem .git")
if scan.exists and not scan.readme_excerpt:
warnings.append("README tecnico nao encontrado")
if scan.exists and scan.code_lines == 0:
warnings.append("nenhuma linha de codigo TS/JS/Python/Java encontrada")
if scan.exists and not scan.has_tests:
warnings.append("testes nao encontrados por varredura local")
if scan.exists and not scan.has_openapi:
warnings.append("contrato OpenAPI nao encontrado por varredura local")
return tuple(dict.fromkeys(warnings))
def scan_platform(root: Path, platform: PlatformDefinition, options: ScanOptions | None = None) -> PlatformScan:
options = options or ScanOptions()
repo = root / platform.repo_name
exists = repo.exists()
git_present, branch, head, remote = detect_git(repo) if exists else (False, None, None, None)
readme = read_readme(repo, options.max_readme_chars) if exists else ""
metrics: list[FileMetric] = []
evidence_bucket: dict[EvidenceKind, list[Evidence]] = {}
warnings: list[str] = []
scripts = load_package_scripts(repo) if exists else ()
for script in scripts:
add_limited(
evidence_bucket,
Evidence(
EvidenceKind.PACKAGE_SCRIPT,
script.source_file,
f"Script '{script.name}' com intencao '{script.intent}'.",
confidence=0.62,
tags=(script.intent,),
),
options.max_evidence_per_kind,
)
if exists:
for file_path in iter_files(repo):
metric = metric_for_file(file_path, repo, options)
if metric is not None:
metrics.append(metric)
filename_evidence = evidence_from_filename(file_path, repo)
if filename_evidence is not None:
add_limited(evidence_bucket, filename_evidence, options.max_evidence_per_kind)
if file_path.suffix.lower() in CODE_EXTENSIONS | {".md", ".json"} and not is_sensitive_path(file_path):
text = read_text_limited(file_path, options.max_file_bytes)
if text:
for item in evidence_from_text(file_path, repo, text, limit=6):
add_limited(evidence_bucket, item, options.max_evidence_per_kind)
evidence: list[Evidence] = []
for kind in sorted(evidence_bucket, key=lambda item: item.value):
evidence.extend(evidence_bucket[kind])
scan = PlatformScan(
platform=platform,
repo_path=str(repo),
exists=exists,
git_present=git_present,
branch=branch,
head=head,
remote_origin=remote,
readme_excerpt=readme,
file_metrics=tuple(metrics),
scripts=scripts,
evidence=tuple(evidence),
warnings=tuple(warnings),
)
return PlatformScan(
platform=scan.platform,
repo_path=scan.repo_path,
exists=scan.exists,
git_present=scan.git_present,
branch=scan.branch,
head=scan.head,
remote_origin=scan.remote_origin,
readme_excerpt=scan.readme_excerpt,
file_metrics=scan.file_metrics,
scripts=scan.scripts,
evidence=scan.evidence,
warnings=classify_warnings(scan),
scanned_at=scan.scanned_at,
)
def scan_ecosystem(root: Path, platforms: Sequence[PlatformDefinition] = PLATFORMS) -> tuple[PlatformScan, ...]:
return tuple(scan_platform(root, platform) for platform in platforms)
def summarize_extensions(metrics: Iterable[FileMetric]) -> dict[str, int]:
result: dict[str, int] = {}
for metric in metrics:
result[metric.extension] = result.get(metric.extension, 0) + metric.lines
return dict(sorted(result.items(), key=lambda item: (-item[1], item[0])))
def detect_human_keywords(scan: PlatformScan) -> dict[str, int]:
counts: dict[str, int] = {}
for evidence in scan.evidence:
text = f"{evidence.summary} {' '.join(evidence.tags)}".lower()
for category, keywords in CATEGORY_KEYWORDS.items():
if any(keyword.lower() in text for keyword in keywords):
counts[category.value] = counts.get(category.value, 0) + 1
return dict(sorted(counts.items(), key=lambda item: (-item[1], item[0])))
def list_candidate_roots(root: Path) -> tuple[str, ...]:
if not root.exists():
return ()
output: list[str] = []
for entry in sorted(root.iterdir(), key=lambda item: item.name.lower()):
if entry.is_dir() and entry.name.startswith("tudo-para-ia-"):
output.append(entry.name)
return tuple(output)
def environment_summary(root: Path) -> dict[str, object]:
return {
"root": str(root),
"root_exists": root.exists(),
"candidate_repositories": list_candidate_roots(root),
"platform_catalog_size": len(PLATFORMS),
"skip_dirs": sorted(SKIP_DIRS),
}

View File

@@ -0,0 +1,510 @@
"""Service-order discovery, parsing, and lifecycle closeout utilities."""
from __future__ import annotations
import re
from pathlib import Path
from typing import Iterable, Mapping, Sequence
from .catalog import PLATFORM_BY_ID
from .governance_models import (
EcosystemGovernancePortfolio,
GovernanceEvidence,
GovernanceEvidenceKind,
GovernanceOrderCandidate,
OrderLifecycleDecision,
OrderLifecycleStatus,
ParsedServiceOrder,
RoundExecutionPackage,
RoundMinimum,
minimum_status,
)
from .models import OrderType, merge_unique, slugify, utc_now
from .operational_models import ExecutionRoundDossier, stable_digest
ORDER_FILE_RE = re.compile(r"^\d{4}_(EXECUTIVA|GERENCIAL)__.+\.md$", re.I)
IDENT_RE = re.compile(r"^\s*-\s*([A-Za-z0-9_\- ]+)\s*:\s*`?([^`\n]+)`?\s*$")
HEADING_RE = re.compile(r"^(#{1,4})\s+(.+?)\s*$")
def normalize_status(value: str) -> OrderLifecycleStatus:
cleaned = value.strip().lower().replace("ç", "c").replace("ã", "a").replace("í", "i")
cleaned = cleaned.replace(" ", "_").replace("-", "_")
mapping = {
"planejada": OrderLifecycleStatus.PLANNED,
"planned": OrderLifecycleStatus.PLANNED,
"em_execucao": OrderLifecycleStatus.RUNNING,
"running": OrderLifecycleStatus.RUNNING,
"concluida": OrderLifecycleStatus.COMPLETED,
"concluida": OrderLifecycleStatus.COMPLETED,
"completed": OrderLifecycleStatus.COMPLETED,
"parcial": OrderLifecycleStatus.PARTIAL,
"partial": OrderLifecycleStatus.PARTIAL,
"bloqueada": OrderLifecycleStatus.BLOCKED,
"blocked": OrderLifecycleStatus.BLOCKED,
"substituida": OrderLifecycleStatus.SUPERSEDED,
"superseded": OrderLifecycleStatus.SUPERSEDED,
}
return mapping.get(cleaned, OrderLifecycleStatus.UNKNOWN)
def normalize_order_type(value: str) -> OrderType:
lowered = value.lower()
if "gerencial" in lowered:
return OrderType.MANAGERIAL
return OrderType.EXECUTIVE
def detect_order_type_from_name(path: Path) -> OrderType:
return OrderType.MANAGERIAL if "_GERENCIAL__" in path.name.upper() else OrderType.EXECUTIVE
def split_markdown_headings(text: str) -> dict[str, str]:
headings: dict[str, list[str]] = {}
current = "__root__"
headings[current] = []
for line in text.splitlines():
match = HEADING_RE.match(line)
if match:
current = slugify(match.group(2))
headings.setdefault(current, [])
continue
headings.setdefault(current, []).append(line)
return {key: "\n".join(value).strip() for key, value in headings.items()}
def parse_identification(text: str) -> dict[str, str]:
values: dict[str, str] = {}
for line in text.splitlines():
match = IDENT_RE.match(line)
if not match:
continue
key = slugify(match.group(1)).replace("-", "_")
values[key] = match.group(2).strip()
return values
def extract_section(headings: Mapping[str, str], *names: str) -> str:
for name in names:
key = slugify(name)
if key in headings:
return headings[key].strip()
return ""
def extract_bullets_or_paths(text: str) -> tuple[str, ...]:
values: list[str] = []
for line in text.splitlines():
stripped = line.strip()
if not stripped.startswith("-"):
continue
item = stripped.lstrip("-").strip()
if item.startswith("`") and item.endswith("`"):
item = item.strip("`")
if item:
values.append(item)
return merge_unique(values)
def platform_hint_from_text(text: str) -> str:
lowered = text.lower().replace("\\", "/")
candidates = sorted(PLATFORM_BY_ID.keys(), key=len, reverse=True)
for platform_id in candidates:
direct = platform_id.lower()
dashed = direct.replace("_", "-")
repo = PLATFORM_BY_ID[platform_id].repo_name.lower()
if direct in lowered or dashed in lowered or repo in lowered:
return platform_id
return "ecosystem"
def title_from_order_id(order_id: str, fallback: str) -> str:
if "__" in order_id:
return order_id.split("__", 1)[1].replace("-", " ").strip().capitalize()
return fallback.strip() or order_id
def parse_order_markdown(path: Path) -> ParsedServiceOrder:
text = path.read_text(encoding="utf-8", errors="ignore")
headings = split_markdown_headings(text)
ident = parse_identification(text)
order_id = ident.get("order_id") or path.stem
order_type = normalize_order_type(ident.get("tipo", path.name))
if not ident.get("tipo"):
order_type = detect_order_type_from_name(path)
status = normalize_status(ident.get("status", "desconhecida"))
priority = ident.get("prioridade", "media")
purpose = extract_section(headings, "Finalidade da ordem de servico", "Finalidade da ordem de serviço")
object_scope = extract_section(headings, "Objeto da ordem de servico", "Objeto da ordem de serviço")
reason = extract_section(headings, "Motivo da criacao da ordem de servico", "Motivo da criação da ordem de serviço")
expected = extract_section(headings, "Resultado esperado da execucao", "Resultado esperado da execução")
affected_text = extract_section(headings, "Arquivos e areas afetadas", "Arquivos e áreas afetadas")
validations_text = extract_section(headings, "Validacoes", "Validações")
merged_text = "\n".join([text, object_scope, reason, affected_text])
return ParsedServiceOrder(
path=str(path),
order_id=order_id,
order_type=order_type,
project_id=ident.get("project_id", "tudo-para-ia-mais-humana"),
status=status,
priority=priority,
title=title_from_order_id(order_id, purpose.splitlines()[0] if purpose else path.stem),
platform_hint=platform_hint_from_text(merged_text),
purpose=purpose,
object_scope=object_scope,
reason=reason,
expected_result=expected,
affected_paths=extract_bullets_or_paths(affected_text),
validations=extract_bullets_or_paths(validations_text),
raw_headings=headings,
)
def discover_order_files(platform_folder: Path) -> tuple[Path, ...]:
candidates: list[Path] = []
for subdir in (platform_folder / "orders" / "executivas", platform_folder / "orders" / "gerenciais"):
if not subdir.exists():
continue
for path in sorted(subdir.glob("*.md"), key=lambda item: item.name):
if ORDER_FILE_RE.match(path.name):
candidates.append(path)
return tuple(candidates)
def discover_orders(platform_folder: Path) -> tuple[ParsedServiceOrder, ...]:
orders: list[ParsedServiceOrder] = []
for path in discover_order_files(platform_folder):
try:
orders.append(parse_order_markdown(path))
except OSError:
continue
return tuple(orders)
def evidence_for_card(portfolio: EcosystemGovernancePortfolio, platform_id: str, limit: int = 8) -> tuple[GovernanceEvidence, ...]:
card = portfolio.card_for(platform_id)
if card is None:
return (
GovernanceEvidence(
path=platform_id,
summary="Plataforma nao encontrada no portfolio de governanca.",
kind=GovernanceEvidenceKind.ABSENCE,
confidence=0.5,
),
)
evidence: list[GovernanceEvidence] = []
for check in sorted(card.checks, key=lambda item: (item.score, item.title)):
evidence.extend(check.evidence)
if len(evidence) >= limit:
break
if not evidence:
evidence.append(
GovernanceEvidence(
path=card.repo_path,
summary=f"Card de governanca {card.status_label} com score {card.governance_score}.",
kind=GovernanceEvidenceKind.DERIVED,
confidence=0.65,
)
)
return tuple(evidence[:limit])
def candidates_for_platform(portfolio: EcosystemGovernancePortfolio, platform_id: str) -> tuple[GovernanceOrderCandidate, ...]:
return tuple(candidate for candidate in portfolio.order_candidates if candidate.platform_id == platform_id)
def decision_status_for_order(order: ParsedServiceOrder, portfolio: EcosystemGovernancePortfolio) -> OrderLifecycleStatus:
if order.status == OrderLifecycleStatus.COMPLETED:
return OrderLifecycleStatus.COMPLETED
if order.status == OrderLifecycleStatus.SUPERSEDED:
return OrderLifecycleStatus.SUPERSEDED
card = portfolio.card_for(order.platform_hint)
if card is None:
return OrderLifecycleStatus.BLOCKED
if card.blockers:
if any(check.severity.value == "critical" for check in card.blockers):
return OrderLifecycleStatus.BLOCKED
return OrderLifecycleStatus.PARTIAL
if card.warnings and "resolver-ou-formalizar-bloqueios" in order.order_id:
return OrderLifecycleStatus.PARTIAL
return OrderLifecycleStatus.COMPLETED
def pending_items_for_order(order: ParsedServiceOrder, portfolio: EcosystemGovernancePortfolio) -> tuple[str, ...]:
card = portfolio.card_for(order.platform_hint)
if card is None:
return ("plataforma relacionada nao encontrada no portfolio de governanca",)
items: list[str] = []
for check in card.blockers[:8]:
items.append(f"{check.title}: {check.next_action}")
if not items and order.status == OrderLifecycleStatus.PARTIAL:
for check in card.warnings[:5]:
items.append(f"{check.title}: {check.next_action}")
return merge_unique(items)
def resulting_candidates_for_order(order: ParsedServiceOrder, portfolio: EcosystemGovernancePortfolio) -> tuple[str, ...]:
candidates = candidates_for_platform(portfolio, order.platform_hint)
preferred: list[str] = []
for candidate in candidates:
if candidate.order_type == order.order_type:
preferred.append(candidate.candidate_id)
if not preferred:
preferred = [candidate.candidate_id for candidate in candidates]
return tuple(preferred[:5])
def validation_steps_for_order(order: ParsedServiceOrder, portfolio: EcosystemGovernancePortfolio) -> tuple[str, ...]:
steps: list[str] = list(order.validations)
card = portfolio.card_for(order.platform_hint)
if card is not None:
for check in card.checks:
if check.needs_order:
steps.extend(check.validation_steps)
if len(steps) >= 10:
break
steps.extend(("regenerar portfolio de governanca", "atualizar SQL semantico", "registrar pendencias reais"))
return merge_unique(steps)[:12]
def decide_order(order: ParsedServiceOrder, portfolio: EcosystemGovernancePortfolio) -> OrderLifecycleDecision:
final_status = decision_status_for_order(order, portfolio)
pending = pending_items_for_order(order, portfolio) if final_status in {OrderLifecycleStatus.PARTIAL, OrderLifecycleStatus.BLOCKED} else ()
reason = "Ordem tratada na rodada por avaliacao de governanca operacional."
if final_status == OrderLifecycleStatus.COMPLETED:
reason = "A ordem foi considerada concluida ou ja constava concluida, com evidencia suficiente para fechamento."
elif final_status == OrderLifecycleStatus.PARTIAL:
reason = "A ordem foi executada como formalizacao/avanco parcial; restam pendencias materiais."
elif final_status == OrderLifecycleStatus.BLOCKED:
reason = "A ordem depende de bloqueio material ou evidencia ausente que nao pode ser resolvida nesta rodada."
elif final_status == OrderLifecycleStatus.SUPERSEDED:
reason = "A ordem foi substituida por continuidade mais especifica."
return OrderLifecycleDecision(
order=order,
final_status=final_status,
platform_id=order.platform_hint,
reason=reason,
evidence=evidence_for_card(portfolio, order.platform_hint),
pending_items=pending,
resulting_candidates=resulting_candidates_for_order(order, portfolio),
validation_steps=validation_steps_for_order(order, portfolio),
)
def output_candidates_by_type(candidates: Sequence[GovernanceOrderCandidate], order_type: OrderType) -> tuple[GovernanceOrderCandidate, ...]:
return tuple(candidate for candidate in candidates if candidate.order_type == order_type)
def build_round_minimums(
decisions: Sequence[OrderLifecycleDecision],
output_candidates: Sequence[GovernanceOrderCandidate],
total_code_lines_analyzed: int,
code_lines_available: int,
) -> tuple[RoundMinimum, ...]:
executed_executive = sum(1 for decision in decisions if decision.order.order_type == OrderType.EXECUTIVE)
executed_managerial = sum(1 for decision in decisions if decision.order.order_type == OrderType.MANAGERIAL)
output_executive = len(output_candidates_by_type(output_candidates, OrderType.EXECUTIVE))
output_managerial = len(output_candidates_by_type(output_candidates, OrderType.MANAGERIAL))
return (
RoundMinimum(
"executive-cycle",
"Executar ao menos 5 ordens executivas",
5,
executed_executive,
minimum_status(executed_executive, 5),
"Ordens executivas foram descobertas e tratadas pela rodada.",
"criar OS executiva padrao se o ciclo ficar abaixo do minimo",
),
RoundMinimum(
"managerial-cycle",
"Executar ao menos 5 ordens gerenciais",
5,
executed_managerial,
minimum_status(executed_managerial, 5),
"Ordens gerenciais foram descobertas e tratadas pela rodada.",
"executar ordem gerencial comum de fechamento",
),
RoundMinimum(
"executive-output",
"Criar ao menos 5 ordens executivas de saida",
5,
output_executive,
minimum_status(output_executive, 5),
"Candidatas executivas foram criadas a partir de checks de governanca.",
"converter checks bloqueantes remanescentes em OS executivas",
),
RoundMinimum(
"managerial-output",
"Criar ao menos 5 ordens gerenciais de saida",
5,
output_managerial,
minimum_status(output_managerial, 5),
"Candidatas gerenciais foram criadas a partir de checks de governanca.",
"converter gaps de maturidade em OS gerenciais",
),
RoundMinimum(
"reading-minimum",
"Analisar ao menos 10.000 linhas tecnicas/documentais quando houver base",
10000,
total_code_lines_analyzed,
minimum_status(total_code_lines_analyzed, 10000, impossible=total_code_lines_analyzed == 0),
"Leitura registrada pelo scanner e/ou pelas plataformas relacionadas.",
"ampliar escopo de leitura ou registrar ausencia material",
),
RoundMinimum(
"production-minimum",
"Produzir ao menos 5.500 linhas uteis em Python/TS/JS/Java quando houver base",
5500,
code_lines_available,
minimum_status(code_lines_available, 5500, impossible=code_lines_available == 0),
"Linhas de codigo disponiveis no projeto real depois da rodada.",
"ampliar engine Python de governanca sem enchimento artificial",
),
)
def build_round_execution_package(
platform_folder: Path,
portfolio: EcosystemGovernancePortfolio,
round_dossier: ExecutionRoundDossier | None = None,
total_code_lines_analyzed: int = 0,
code_lines_available: int = 0,
) -> RoundExecutionPackage:
parsed_orders = discover_orders(platform_folder)
decisions = tuple(decide_order(order, portfolio) for order in parsed_orders)
candidates = portfolio.order_candidates
minimums = build_round_minimums(decisions, candidates, total_code_lines_analyzed, code_lines_available)
completed = sum(1 for decision in decisions if decision.final_status == OrderLifecycleStatus.COMPLETED)
partial = sum(1 for decision in decisions if decision.final_status == OrderLifecycleStatus.PARTIAL)
blocked = sum(1 for decision in decisions if decision.final_status == OrderLifecycleStatus.BLOCKED)
active_after = tuple(candidate.candidate_id for candidate in candidates[:20])
if round_dossier is not None:
active_after = merge_unique(tuple(round_dossier.output_orders) + active_after)
digest = stable_digest((platform_folder, [decision.compact_line for decision in decisions], active_after, utc_now()), length=10)
return RoundExecutionPackage(
round_id=f"mais-humana-lifecycle-{digest}",
project_id="tudo-para-ia-mais-humana",
parsed_orders=parsed_orders,
decisions=decisions,
minimums=minimums,
output_candidates=candidates,
active_after_round=active_after,
completed_count=completed,
partial_count=partial,
blocked_count=blocked,
)
def lifecycle_execution_markdown(package: RoundExecutionPackage) -> str:
lines = [
"# EXECUTADO - Fechamento de ordens ativas",
"",
f"- round_id: `{package.round_id}`",
f"- project_id: `{package.project_id}`",
f"- generated_at: `{package.generated_at}`",
f"- status: `{package.success_label}`",
f"- ordens lidas: `{len(package.parsed_orders)}`",
f"- concluidas: `{package.completed_count}`",
f"- parciais: `{package.partial_count}`",
f"- bloqueadas: `{package.blocked_count}`",
"",
"## Minimos da rodada",
"",
]
for minimum in package.minimums:
lines.append(
f"- `{minimum.minimum_id}` {minimum.title}: `{minimum.actual_value}/{minimum.required_value}` "
f"status `{minimum.status.value}` - {minimum.reason}"
)
lines.extend(["", "## Decisoes por ordem", ""])
for decision in package.decisions:
lines.append(f"### {decision.order.order_id}")
lines.append("")
lines.append(f"- arquivo: `{decision.order.path}`")
lines.append(f"- tipo: `{decision.order.order_type.value}`")
lines.append(f"- plataforma: `{decision.platform_id}`")
lines.append(f"- status_final: `{decision.final_status.value}`")
lines.append(f"- razao: {decision.reason}")
if decision.resulting_candidates:
lines.append("- candidatas vinculadas: " + ", ".join(f"`{item}`" for item in decision.resulting_candidates))
if decision.pending_items:
lines.append("- pendencias:")
for pending in decision.pending_items:
lines.append(f" - {pending}")
else:
lines.append("- pendencias: nenhuma pendencia material associada")
lines.append("")
return "\n".join(lines).strip() + "\n"
def lifecycle_pending_markdown(package: RoundExecutionPackage) -> str:
lines = ["# PENDENCIAS-CODEX - Fechamento de ordens ativas", ""]
if package.pending_items:
lines.extend(f"- {item}" for item in package.pending_items)
else:
lines.append("- Nenhuma pendencia material consolidada pelo lifecycle.")
return "\n".join(lines).strip() + "\n"
def lifecycle_audit_markdown(package: RoundExecutionPackage) -> str:
lines = [
"# AUDITORIA-GPT - Fechamento de ordens ativas",
"",
"## Confirmado",
"",
f"- Ordens descobertas na pasta indicada: `{len(package.parsed_orders)}`",
f"- Ordens concluidas: `{package.completed_count}`",
f"- Ordens parciais: `{package.partial_count}`",
f"- Ordens bloqueadas: `{package.blocked_count}`",
f"- Ordens/candidatas ativas apos rodada: `{len(package.active_after_round)}`",
"",
"## Minimos",
"",
]
for minimum in package.minimums:
lines.append(f"- `{minimum.minimum_id}`: `{minimum.status.value}` ({minimum.actual_value}/{minimum.required_value})")
lines.extend(["", "## Evidencias por decisao", ""])
for decision in package.decisions:
lines.append(f"### {decision.order.order_id}")
lines.append("")
for evidence in decision.evidence[:8]:
lines.append(f"- `{evidence.reference}` - {evidence.summary}")
if not decision.evidence:
lines.append("- nenhuma evidencia direta registrada")
lines.append("")
return "\n".join(lines).strip() + "\n"
def lifecycle_queue_markdown(package: RoundExecutionPackage) -> str:
lines = ["# Fila ativa apos rodada", ""]
if not package.active_after_round:
lines.append("- Nenhuma ordem ativa de saida foi registrada.")
for item in package.active_after_round:
lines.append(f"- `{item}`")
return "\n".join(lines).strip() + "\n"
def lifecycle_jsonable(package: RoundExecutionPackage) -> dict[str, object]:
return package.to_dict()
def write_lifecycle_artifacts(platform_folder: Path, package: RoundExecutionPackage) -> tuple[Path, ...]:
reports = platform_folder / "reports"
audit = platform_folder / "audit"
current = platform_folder / "current"
indexes = platform_folder / "indexes"
for folder in (reports, audit, current, indexes):
folder.mkdir(parents=True, exist_ok=True)
paths = {
reports / "EXECUTADO__fechamento-ordens-ativas.md": lifecycle_execution_markdown(package),
reports / "PENDENCIAS-CODEX__fechamento-ordens-ativas.md": lifecycle_pending_markdown(package),
audit / "AUDITORIA-GPT__fechamento-ordens-ativas.md": lifecycle_audit_markdown(package),
current / "active-order-queue.md": lifecycle_queue_markdown(package),
indexes / "orders-lifecycle-index.md": lifecycle_execution_markdown(package),
}
written: list[Path] = []
for path, content in paths.items():
path.write_text(content, encoding="utf-8")
written.append(path)
return tuple(written)

View File

@@ -0,0 +1,157 @@
"""Snapshot and diff utilities for comparing human maturity over time."""
from __future__ import annotations
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Mapping, Sequence
from .models import PlatformHumanReport, as_plain_data, utc_now
@dataclass(slots=True)
class PlatformScoreSnapshot:
platform_id: str
average_score: int
code_lines: int
evidence_count: int
warnings: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class EcosystemSnapshot:
generated_at: str
platforms: tuple[PlatformScoreSnapshot, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class SnapshotDelta:
platform_id: str
before_score: int | None
after_score: int | None
score_delta: int | None
before_evidence: int | None
after_evidence: int | None
evidence_delta: int | None
status: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def snapshot_from_reports(reports: Sequence[PlatformHumanReport]) -> EcosystemSnapshot:
platforms = tuple(
PlatformScoreSnapshot(
platform_id=report.platform.platform_id,
average_score=report.average_score,
code_lines=report.scan.code_lines,
evidence_count=len(report.scan.evidence),
warnings=tuple(report.scan.warnings),
)
for report in sorted(reports, key=lambda item: item.platform.platform_id)
)
return EcosystemSnapshot(generated_at=utc_now(), platforms=platforms)
def write_snapshot(path: Path, snapshot: EcosystemSnapshot) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(as_plain_data(snapshot), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
return path
def load_snapshot(path: Path) -> EcosystemSnapshot | None:
if not path.exists():
return None
try:
data = json.loads(path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
return None
platforms = []
for item in data.get("platforms", []):
if not isinstance(item, Mapping):
continue
platforms.append(
PlatformScoreSnapshot(
platform_id=str(item.get("platform_id", "")),
average_score=int(item.get("average_score", 0)),
code_lines=int(item.get("code_lines", 0)),
evidence_count=int(item.get("evidence_count", 0)),
warnings=tuple(str(value) for value in item.get("warnings", ())),
)
)
return EcosystemSnapshot(generated_at=str(data.get("generated_at", "")), platforms=tuple(platforms))
def diff_snapshots(before: EcosystemSnapshot | None, after: EcosystemSnapshot) -> tuple[SnapshotDelta, ...]:
before_by_id = {item.platform_id: item for item in before.platforms} if before else {}
after_by_id = {item.platform_id: item for item in after.platforms}
all_ids = sorted(set(before_by_id) | set(after_by_id))
deltas: list[SnapshotDelta] = []
for platform_id in all_ids:
old = before_by_id.get(platform_id)
new = after_by_id.get(platform_id)
if old and new:
score_delta = new.average_score - old.average_score
evidence_delta = new.evidence_count - old.evidence_count
status = "improved" if score_delta > 0 else "regressed" if score_delta < 0 else "stable"
deltas.append(
SnapshotDelta(
platform_id=platform_id,
before_score=old.average_score,
after_score=new.average_score,
score_delta=score_delta,
before_evidence=old.evidence_count,
after_evidence=new.evidence_count,
evidence_delta=evidence_delta,
status=status,
)
)
elif new:
deltas.append(
SnapshotDelta(
platform_id=platform_id,
before_score=None,
after_score=new.average_score,
score_delta=None,
before_evidence=None,
after_evidence=new.evidence_count,
evidence_delta=None,
status="new",
)
)
elif old:
deltas.append(
SnapshotDelta(
platform_id=platform_id,
before_score=old.average_score,
after_score=None,
score_delta=None,
before_evidence=old.evidence_count,
after_evidence=None,
evidence_delta=None,
status="missing",
)
)
return tuple(deltas)
def snapshot_delta_markdown(deltas: Sequence[SnapshotDelta]) -> str:
lines = ["# Delta de maturidade humana", ""]
if not deltas:
lines.append("Sem delta disponivel.")
return "\n".join(lines) + "\n"
for delta in deltas:
lines.append(
f"- {delta.platform_id}: {delta.status}; "
f"score {delta.before_score} -> {delta.after_score}; "
f"evidencias {delta.before_evidence} -> {delta.after_evidence}"
)
return "\n".join(lines) + "\n"

View File

@@ -0,0 +1,95 @@
"""Status-page markdown writers for the central management dossier."""
from __future__ import annotations
from pathlib import Path
from typing import Sequence
from .models import PlatformHumanReport, ReportBundle, ServiceOrder
def current_project_state_markdown(bundle: ReportBundle, reports: Sequence[PlatformHumanReport]) -> str:
avg = round(sum(report.average_score for report in reports) / len(reports)) if reports else 0
lines = [
"# Estado atual - tudo-para-ia-mais-humana",
"",
"## Identidade",
"",
"- project_id: `tudo-para-ia-mais-humana`",
"- repo_name: `tudo-para-ia-mais-humana`",
"- papel: plataforma de traducao humana do ecossistema",
"- status: `ativo_em_fundacao`",
"",
"## Estado material",
"",
f"- plataformas avaliadas: `{bundle.platform_count}`",
f"- perfis humanos: `{bundle.profile_count}`",
f"- celulas de matriz: `{bundle.matrix_cells}`",
f"- linhas de codigo analisadas: `{bundle.total_code_lines_analyzed}`",
f"- score medio humano: `{avg}`",
f"- arquivos gerados ou alterados: `{len(bundle.generated_files)}`",
"",
"## Plataformas com menor score",
"",
]
for report in sorted(reports, key=lambda item: item.average_score)[:8]:
lines.append(f"- {report.platform.platform_id}: `{report.average_score}`")
lines.append("")
lines.append("## Proxima acao correta")
lines.append("")
lines.append("Executar as ordens de saida ativas e comparar novo snapshot contra esta fundacao.")
return "\n".join(lines) + "\n"
def orders_index_markdown(orders: Sequence[ServiceOrder]) -> str:
lines = ["# Indice de ordens - tudo-para-ia-mais-humana", ""]
for order in orders:
lines.append(f"- `{order.order_id}` | `{order.order_type.value}` | `{order.status.value}` | {order.title}")
return "\n".join(lines) + "\n"
def reports_index_markdown(bundle: ReportBundle) -> str:
lines = ["# Indice de relatorios - tudo-para-ia-mais-humana", ""]
for file in sorted(bundle.generated_files, key=lambda item: item.path):
if file.file_type in {"markdown", "docx", "svg", "html"}:
lines.append(f"- `{file.path}` - {file.description}")
return "\n".join(lines) + "\n"
def pending_index_markdown(reports: Sequence[PlatformHumanReport]) -> str:
lines = ["# Indice de pendencias - tudo-para-ia-mais-humana", ""]
found = False
for report in sorted(reports, key=lambda item: item.platform.platform_id):
for warning in report.scan.warnings:
found = True
lines.append(f"- `{report.platform.platform_id}` - {warning}")
if not found:
lines.append("- Nenhuma pendencia material detectada.")
return "\n".join(lines) + "\n"
def write_central_status_pages(
central_folder: Path,
bundle: ReportBundle,
reports: Sequence[PlatformHumanReport],
orders: Sequence[ServiceOrder],
) -> tuple[Path, ...]:
paths: list[Path] = []
current = central_folder / "current"
indexes = central_folder / "indexes"
status = central_folder / "status"
for folder in (current, indexes, status):
folder.mkdir(parents=True, exist_ok=True)
pages = {
current / "current-project-state.md": current_project_state_markdown(bundle, reports),
current / "active-order-queue.md": orders_index_markdown(orders),
indexes / "orders-index.md": orders_index_markdown(orders),
indexes / "reports-index.md": reports_index_markdown(bundle),
indexes / "pending-index.md": pending_index_markdown(reports),
status / "overview.md": current_project_state_markdown(bundle, reports),
}
for path, content in pages.items():
path.write_text(content, encoding="utf-8")
paths.append(path)
return tuple(paths)

View File

@@ -0,0 +1,213 @@
"""Reconcile central status pages with governance, lifecycle, and assurance."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Sequence
from .governance_models import EcosystemGovernancePortfolio, RoundExecutionPackage
from .human_readiness_registry import ReadinessRegistry, registry_gap_actions
from .models import as_plain_data, merge_unique
from .round_assurance import AssuranceSuite, assurance_pending_items
from .runtime_budget import RoundLineBudget, budget_pending_items
from .workflow_registry import WorkflowPortfolio, workflow_action_items
from .governance_scenarios import ScenarioPortfolio, scenario_action_items
@dataclass(slots=True)
class ReconciledStatus:
project_id: str
status_label: str
current_state_lines: tuple[str, ...]
pending_lines: tuple[str, ...]
index_lines: tuple[str, ...]
release_lines: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def status_label(portfolio: EcosystemGovernancePortfolio, assurance: AssuranceSuite | None) -> str:
if assurance is not None and assurance.blocker_count:
return "parcial-com-blockers-de-assurance"
if portfolio.blocked_platforms:
return "parcial-com-blockers-de-governanca"
if portfolio.average_governance_score >= 82:
return "controlado"
if portfolio.average_governance_score >= 62:
return "explicavel"
return "fundacao"
def build_current_state_lines(
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
scenarios: ScenarioPortfolio,
lifecycle: RoundExecutionPackage | None,
budget: RoundLineBudget | None,
assurance: AssuranceSuite | None,
) -> tuple[str, ...]:
lines = [
"- project_id: `tudo-para-ia-mais-humana`",
"- papel: plataforma de traducao humana, governanca operacional e continuidade por evidencias",
f"- status: `{status_label(portfolio, assurance)}`",
f"- score_governanca_medio: `{portfolio.average_governance_score}`",
f"- plataformas_bloqueadas: `{len(portfolio.blocked_platforms)}`",
f"- registros_prontidao: `{len(registry.entries)}`",
f"- workflows: `{len(workflows.evaluations)}`",
f"- cenarios: `{len(scenarios.evaluations)}`",
]
if lifecycle is not None:
lines.extend(
[
f"- ordens_lidas: `{len(lifecycle.parsed_orders)}`",
f"- ordens_concluidas: `{lifecycle.completed_count}`",
f"- ordens_parciais: `{lifecycle.partial_count}`",
f"- ordens_bloqueadas: `{lifecycle.blocked_count}`",
]
)
if budget is not None:
lines.extend(
[
f"- linhas_tecnicas_lidas: `{budget.total_technical_lines}`",
f"- linhas_codigo_total: `{budget.total_code_lines}`",
f"- minimo_leitura_ok: `{budget.reading_minimum_met}`",
f"- minimo_producao_ok: `{budget.production_minimum_met}`",
]
)
if assurance is not None:
lines.extend(
[
f"- assurance_passed: `{assurance.passed}`",
f"- assurance_blockers: `{assurance.blocker_count}`",
f"- assurance_warnings: `{assurance.warning_count}`",
]
)
return tuple(lines)
def build_pending_lines(
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
scenarios: ScenarioPortfolio,
lifecycle: RoundExecutionPackage | None,
budget: RoundLineBudget | None,
assurance: AssuranceSuite | None,
) -> tuple[str, ...]:
lines: list[str] = []
lines.extend(portfolio.blockers_summary)
lines.extend(registry_gap_actions(registry, limit=15))
lines.extend(workflow_action_items(workflows, limit=15))
lines.extend(scenario_action_items(scenarios, limit=15))
if lifecycle is not None:
lines.extend(lifecycle.pending_items)
if budget is not None:
lines.extend(budget_pending_items(budget))
if assurance is not None:
lines.extend(assurance_pending_items(assurance))
return merge_unique(lines)[:80]
def build_index_lines(portfolio: EcosystemGovernancePortfolio) -> tuple[str, ...]:
lines = [
"- `dados/governanca-operacional.json` - portfolio completo de governanca",
"- `dados/governanca-operacional-compacta.json` - leitura compacta do estado",
"- `dados/registro-prontidao-humana.json` - matriz plataforma x perfil x governanca",
"- `dados/workflows-humanos.json` - workflows humanos avaliados",
"- `dados/cenarios-governanca.json` - cenarios de aceite",
"- `dados/grafo-evidencias.json` - grafo de checks, evidencias, workflows e ordens",
"- `dados/budget-linhas-rodada.json` - leitura e producao de codigo da rodada",
"- `dados/assurance-rodada.json` - assurance final",
]
for candidate in portfolio.order_candidates[:20]:
lines.append(f"- candidata `{candidate.candidate_id}` - {candidate.title}")
return tuple(lines)
def build_release_lines(portfolio: EcosystemGovernancePortfolio, assurance: AssuranceSuite | None) -> tuple[str, ...]:
if assurance is not None and assurance.blocker_count:
return (
"Release operacional nao deve ser promovido sem tratar blockers de assurance.",
"O codigo e artefatos podem ficar commitados como avanco material, mas a proxima OS deve atacar blockers listados.",
)
if portfolio.blocked_platforms:
return (
"Release gerencial permitido como registro de avancos, com pendencias materiais explicitas.",
"Promocao de maturidade do ecossistema deve aguardar resolucao das plataformas bloqueadas.",
)
return (
"Release gerencial apto para continuidade.",
"Manter regressao de governanca, workflows, cenarios e assurance em toda rodada.",
)
def build_reconciled_status(
portfolio: EcosystemGovernancePortfolio,
registry: ReadinessRegistry,
workflows: WorkflowPortfolio,
scenarios: ScenarioPortfolio,
lifecycle: RoundExecutionPackage | None = None,
budget: RoundLineBudget | None = None,
assurance: AssuranceSuite | None = None,
) -> ReconciledStatus:
return ReconciledStatus(
project_id="tudo-para-ia-mais-humana",
status_label=status_label(portfolio, assurance),
current_state_lines=build_current_state_lines(portfolio, registry, workflows, scenarios, lifecycle, budget, assurance),
pending_lines=build_pending_lines(portfolio, registry, workflows, scenarios, lifecycle, budget, assurance),
index_lines=build_index_lines(portfolio),
release_lines=build_release_lines(portfolio, assurance),
)
def current_state_markdown(status: ReconciledStatus) -> str:
lines = ["# Estado atual reconciliado - tudo-para-ia-mais-humana", ""]
lines.extend(status.current_state_lines)
lines.extend(["", "## Release", ""])
lines.extend(f"- {item}" for item in status.release_lines)
return "\n".join(lines).strip() + "\n"
def pending_index_markdown(status: ReconciledStatus) -> str:
lines = ["# Indice de pendencias reconciliadas - tudo-para-ia-mais-humana", ""]
if status.pending_lines:
lines.extend(f"- {item}" for item in status.pending_lines)
else:
lines.append("- Nenhuma pendencia material reconciliada.")
return "\n".join(lines).strip() + "\n"
def reports_index_markdown(status: ReconciledStatus) -> str:
lines = ["# Indice reconciliado de artefatos - tudo-para-ia-mais-humana", ""]
lines.extend(status.index_lines)
return "\n".join(lines).strip() + "\n"
def released_markdown(status: ReconciledStatus) -> str:
lines = ["# Status de release gerencial - tudo-para-ia-mais-humana", ""]
lines.append(f"- status: `{status.status_label}`")
lines.extend(f"- {item}" for item in status.release_lines)
return "\n".join(lines).strip() + "\n"
def write_reconciled_status(platform_folder: Path, status: ReconciledStatus) -> tuple[Path, ...]:
current = platform_folder / "current"
indexes = platform_folder / "indexes"
status_dir = platform_folder / "status"
for folder in (current, indexes, status_dir):
folder.mkdir(parents=True, exist_ok=True)
pages = {
current / "current-project-state.md": current_state_markdown(status),
indexes / "pending-index.md": pending_index_markdown(status),
indexes / "reports-index.md": reports_index_markdown(status),
status_dir / "overview.md": current_state_markdown(status),
status_dir / "released.md": released_markdown(status),
}
written: list[Path] = []
for path, content in pages.items():
path.write_text(content, encoding="utf-8")
written.append(path)
return tuple(written)

345
src/mais_humana/storage.py Normal file
View File

@@ -0,0 +1,345 @@
"""SQLite storage for compact semantic memory."""
from __future__ import annotations
import json
import sqlite3
from pathlib import Path
from typing import Iterable, Sequence
from .models import GeneratedFile, PlatformHumanReport, Recommendation, ServiceOrder, as_plain_data, utc_now
from .operational_models import ExecutionRoundDossier
SCHEMA = """
CREATE TABLE IF NOT EXISTS service_orders (
id INTEGER PRIMARY KEY AUTOINCREMENT,
order_id TEXT UNIQUE NOT NULL,
order_type TEXT NOT NULL,
project_id TEXT NOT NULL,
title TEXT NOT NULL,
purpose TEXT NOT NULL,
object_scope TEXT NOT NULL,
reason TEXT NOT NULL,
expected_result TEXT NOT NULL,
status_semantico TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
caminho_arquivo TEXT UNIQUE NOT NULL,
descricao TEXT NOT NULL,
funcao TEXT NOT NULL,
tipo_arquivo TEXT NOT NULL,
criado_ou_alterado_por TEXT NOT NULL,
o_que_mudou TEXT NOT NULL,
relacao_com_ordem TEXT NOT NULL,
status_semantico TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS platform_reports (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_id TEXT UNIQUE NOT NULL,
average_score INTEGER NOT NULL,
code_lines INTEGER NOT NULL,
evidence_count INTEGER NOT NULL,
warnings_json TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS recommendations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
recommendation_id TEXT UNIQUE NOT NULL,
platform_id TEXT NOT NULL,
title TEXT NOT NULL,
priority INTEGER NOT NULL,
order_type TEXT NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS round_dossiers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
round_id TEXT UNIQUE NOT NULL,
project_id TEXT NOT NULL,
generated_at TEXT NOT NULL,
blockers_count INTEGER NOT NULL,
pending_count INTEGER NOT NULL,
active_orders_count INTEGER NOT NULL,
output_orders_count INTEGER NOT NULL,
total_code_lines_analyzed INTEGER NOT NULL,
code_lines_available_in_project INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS order_justifications (
id INTEGER PRIMARY KEY AUTOINCREMENT,
order_id TEXT UNIQUE NOT NULL,
order_type TEXT NOT NULL,
platform_id TEXT NOT NULL,
closure_status TEXT NOT NULL,
reason TEXT NOT NULL,
execution_summary TEXT NOT NULL,
pending_count INTEGER NOT NULL,
linked_signal_count INTEGER NOT NULL,
linked_gate_count INTEGER NOT NULL,
resulting_order_count INTEGER NOT NULL,
payload_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
"""
def connect(path: Path) -> sqlite3.Connection:
path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(path)
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA synchronous=NORMAL")
conn.executescript(SCHEMA)
return conn
def upsert_files(conn: sqlite3.Connection, files: Iterable[GeneratedFile], status: str = "atualizado") -> None:
now = utc_now()
for item in files:
payload = json.dumps(as_plain_data(item), ensure_ascii=False, sort_keys=True)
conn.execute(
"""
INSERT INTO files (
caminho_arquivo, descricao, funcao, tipo_arquivo, criado_ou_alterado_por,
o_que_mudou, relacao_com_ordem, status_semantico, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(caminho_arquivo) DO UPDATE SET
descricao=excluded.descricao,
funcao=excluded.funcao,
tipo_arquivo=excluded.tipo_arquivo,
criado_ou_alterado_por=excluded.criado_ou_alterado_por,
o_que_mudou=excluded.o_que_mudou,
relacao_com_ordem=excluded.relacao_com_ordem,
status_semantico=excluded.status_semantico,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
item.path,
item.description,
item.function,
item.file_type,
item.changed_by,
item.change_summary,
item.relation_to_order,
status,
payload,
now,
),
)
def upsert_orders(conn: sqlite3.Connection, orders: Iterable[ServiceOrder]) -> None:
now = utc_now()
for order in orders:
payload = json.dumps(as_plain_data(order), ensure_ascii=False, sort_keys=True)
conn.execute(
"""
INSERT INTO service_orders (
order_id, order_type, project_id, title, purpose, object_scope, reason,
expected_result, status_semantico, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(order_id) DO UPDATE SET
order_type=excluded.order_type,
project_id=excluded.project_id,
title=excluded.title,
purpose=excluded.purpose,
object_scope=excluded.object_scope,
reason=excluded.reason,
expected_result=excluded.expected_result,
status_semantico=excluded.status_semantico,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
order.order_id,
order.order_type.value,
order.project_id,
order.title,
order.purpose,
order.object_scope,
order.reason,
order.expected_result,
order.status.value,
payload,
now,
),
)
def upsert_reports(conn: sqlite3.Connection, reports: Sequence[PlatformHumanReport]) -> None:
now = utc_now()
for report in reports:
payload = json.dumps(as_plain_data(report), ensure_ascii=False, sort_keys=True)
warnings = json.dumps(list(report.scan.warnings), ensure_ascii=False)
conn.execute(
"""
INSERT INTO platform_reports (
platform_id, average_score, code_lines, evidence_count, warnings_json,
payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(platform_id) DO UPDATE SET
average_score=excluded.average_score,
code_lines=excluded.code_lines,
evidence_count=excluded.evidence_count,
warnings_json=excluded.warnings_json,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
report.platform.platform_id,
report.average_score,
report.scan.code_lines,
len(report.scan.evidence),
warnings,
payload,
now,
),
)
def upsert_recommendations(conn: sqlite3.Connection, recommendations: Iterable[Recommendation]) -> None:
now = utc_now()
for item in recommendations:
payload = json.dumps(as_plain_data(item), ensure_ascii=False, sort_keys=True)
conn.execute(
"""
INSERT INTO recommendations (
recommendation_id, platform_id, title, priority, order_type, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(recommendation_id) DO UPDATE SET
platform_id=excluded.platform_id,
title=excluded.title,
priority=excluded.priority,
order_type=excluded.order_type,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
item.recommendation_id,
item.platform_id,
item.title,
item.priority,
item.suggested_order_type.value,
payload,
now,
),
)
def upsert_round_dossier(conn: sqlite3.Connection, dossier: ExecutionRoundDossier) -> None:
now = utc_now()
payload = json.dumps(as_plain_data(dossier), ensure_ascii=False, sort_keys=True)
conn.execute(
"""
INSERT INTO round_dossiers (
round_id, project_id, generated_at, blockers_count, pending_count,
active_orders_count, output_orders_count, total_code_lines_analyzed,
code_lines_available_in_project, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(round_id) DO UPDATE SET
project_id=excluded.project_id,
generated_at=excluded.generated_at,
blockers_count=excluded.blockers_count,
pending_count=excluded.pending_count,
active_orders_count=excluded.active_orders_count,
output_orders_count=excluded.output_orders_count,
total_code_lines_analyzed=excluded.total_code_lines_analyzed,
code_lines_available_in_project=excluded.code_lines_available_in_project,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
dossier.round_id,
dossier.project_id,
dossier.generated_at,
len(dossier.blockers),
len(dossier.pending_items),
len(dossier.active_input_orders),
len(dossier.output_orders),
dossier.total_code_lines_analyzed,
dossier.code_lines_available_in_project,
payload,
now,
),
)
for item in dossier.order_justifications:
item_payload = json.dumps(as_plain_data(item), ensure_ascii=False, sort_keys=True)
conn.execute(
"""
INSERT INTO order_justifications (
order_id, order_type, platform_id, closure_status, reason,
execution_summary, pending_count, linked_signal_count, linked_gate_count,
resulting_order_count, payload_json, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(order_id) DO UPDATE SET
order_type=excluded.order_type,
platform_id=excluded.platform_id,
closure_status=excluded.closure_status,
reason=excluded.reason,
execution_summary=excluded.execution_summary,
pending_count=excluded.pending_count,
linked_signal_count=excluded.linked_signal_count,
linked_gate_count=excluded.linked_gate_count,
resulting_order_count=excluded.resulting_order_count,
payload_json=excluded.payload_json,
updated_at=excluded.updated_at
""",
(
item.order_id,
item.order_type.value,
item.platform_id,
item.closure_status.value,
item.reason,
item.execution_summary,
len(item.pending_items),
len(item.linked_signals),
len(item.linked_gates),
len(item.resulting_orders),
item_payload,
now,
),
)
def write_semantic_state(
sqlite_path: Path,
files: Sequence[GeneratedFile],
orders: Sequence[ServiceOrder],
reports: Sequence[PlatformHumanReport],
recommendations: Sequence[Recommendation],
round_dossier: ExecutionRoundDossier | None = None,
) -> None:
with connect(sqlite_path) as conn:
upsert_files(conn, files)
upsert_orders(conn, orders)
upsert_reports(conn, reports)
upsert_recommendations(conn, recommendations)
if round_dossier is not None:
upsert_round_dossier(conn, round_dossier)
conn.commit()
def table_counts(sqlite_path: Path) -> dict[str, int]:
if not sqlite_path.exists():
return {}
with sqlite3.connect(sqlite_path) as conn:
counts: dict[str, int] = {}
for table in ("service_orders", "files", "platform_reports", "recommendations", "round_dossiers", "order_justifications"):
try:
counts[table] = int(conn.execute(f"SELECT COUNT(*) FROM {table}").fetchone()[0])
except sqlite3.DatabaseError:
counts[table] = -1
return counts

View File

@@ -0,0 +1,370 @@
"""Human workflow registry for cross-platform operational maturity."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Sequence
from .governance_models import EcosystemGovernancePortfolio, GovernanceStatus
from .models import as_plain_data, merge_unique
@dataclass(slots=True)
class WorkflowStep:
step_id: str
title: str
owner_platform: str
required_signals: tuple[str, ...]
validation: str
human_output: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class HumanWorkflow:
workflow_id: str
title: str
purpose: str
primary_profile: str
platforms: tuple[str, ...]
steps: tuple[WorkflowStep, ...]
expected_artifacts: tuple[str, ...]
risk_if_missing: str
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class WorkflowEvaluation:
workflow_id: str
title: str
status: str
score: int
passed_steps: int
total_steps: int
blocking_steps: tuple[str, ...]
next_actions: tuple[str, ...]
evidence: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
@dataclass(slots=True)
class WorkflowPortfolio:
workflows: tuple[HumanWorkflow, ...]
evaluations: tuple[WorkflowEvaluation, ...]
summary: tuple[str, ...]
def to_dict(self) -> dict[str, object]:
return as_plain_data(self)
def step(
step_id: str,
title: str,
owner_platform: str,
required_signals: Iterable[str],
validation: str,
human_output: str,
) -> WorkflowStep:
return WorkflowStep(
step_id=step_id,
title=title,
owner_platform=owner_platform,
required_signals=tuple(required_signals),
validation=validation,
human_output=human_output,
)
WORKFLOWS: tuple[HumanWorkflow, ...] = (
HumanWorkflow(
workflow_id="docs-canonical-decision",
title="Decisao canonica de Docs",
purpose="Resolver ambiguidade entre catalogOnly, responseReady minimo e excecao formal.",
primary_profile="planejamento_estrategico",
platforms=("docs", "mcps", "ui", "compliance"),
steps=(
step("docs-source", "Identificar fonte documental canonica", "docs", ("docs", "canonical", "contrato"), "abrir contrato Docs", "fonte documental declarada"),
step("docs-read", "Promover leitura minima ou declarar excecao", "docs", ("responseReady", "catalogOnly", "excecao"), "executar leitura ou registrar decisao", "decisao legivel"),
step("mcp-reconcile", "Reconciliar readiness MCP", "mcps", ("readiness", "blocker", "docs"), "comparar readiness global", "blocker Docs classificado"),
step("ui-explain", "Explicar estado para painel e GPT", "ui", ("sameSource", "panelReady", "sourceHash"), "validar mesma fonte", "painel e GPT explicam igual"),
),
expected_artifacts=("contrato Docs", "readiness reconciliado", "evidencia HTTP", "OS de continuidade"),
risk_if_missing="Docs continua como blocker global ambiguo.",
),
HumanWorkflow(
workflow_id="byok-live-controlled",
title="BYOK live controlado por tenant",
purpose="Provar credencial real sem vazamento, com usuario, organizacao, entitlement, smoke e consumo.",
primary_profile="tecnico",
platforms=("identity", "business", "integracoes", "compliance", "customer_ops"),
steps=(
step("identity-org", "Criar organizacao de teste", "identity", ("organizationId", "tenant"), "criar organizacao", "tenant rastreavel"),
step("identity-user", "Criar usuario e sessao", "identity", ("actorId", "session", "role"), "assumir usuario", "ator autorizado"),
step("business-entitlement", "Vincular produto e entitlement", "business", ("entitlement", "productId", "plan"), "consultar entitlement", "uso permitido"),
step("integracoes-session", "Criar sessao BYOK", "integracoes", ("BYOK", "credentialRef"), "gerar credentialRef", "segredo nao exposto"),
step("integracoes-smoke", "Executar smoke readonly", "integracoes", ("smoke", "readonly", "usage"), "rodar smoke", "provider validado"),
step("compliance-redaction", "Validar nao vazamento", "compliance", ("redaction", "audit", "trace"), "varrer relatorios", "auditoria segura"),
step("support-diagnostic", "Criar diagnostico para falha", "customer_ops", ("diagnostic", "nextAction"), "simular falha", "suporte orientado"),
),
expected_artifacts=("credentialRef", "auditId", "usage", "redaction check", "diagnostico"),
risk_if_missing="Integracao parece pronta, mas nao e autosservico real.",
),
HumanWorkflow(
workflow_id="business-commercial-gate",
title="Gate comercial unico por Business",
purpose="Garantir que plano, franquia, consumo e bloqueio venham de Business.",
primary_profile="financeiro",
platforms=("business", "finance", "integracoes", "public", "customer_ops"),
steps=(
step("plan-source", "Definir plano como fonte Business", "business", ("plano", "entitlement"), "consultar plano", "plano unico"),
step("quota-source", "Materializar franquia e excedente", "business", ("quota", "usage", "franquia"), "simular consumo", "limite claro"),
step("billing-link", "Reconciliar cobranca Finance", "finance", ("invoice", "reconciliation"), "gerar extrato", "fatura rastreavel"),
step("product-isolation", "Isolar blocker por produto", "business", ("productId", "blockerId"), "listar blockers", "impacto isolado"),
step("support-message", "Gerar mensagem humana de bloqueio", "customer_ops", ("support", "nextAction"), "validar mensagem", "cliente orientado"),
),
expected_artifacts=("entitlement", "invoice", "usage", "blocker matrix", "mensagem de suporte"),
risk_if_missing="Produto pode ser vendido ou bloqueado por regra divergente.",
),
HumanWorkflow(
workflow_id="mcp-panel-same-source",
title="Painel humano com mesma fonte do GPT",
purpose="Impedir divergencia entre UI, MCP e explicacao do GPT.",
primary_profile="administrador_empresa",
platforms=("mcps", "ui", "docs", "identity", "business"),
steps=(
step("screen-instance", "Criar instancia administrativa", "mcps", ("viewInstance", "screenData"), "criar instancia", "payload rastreavel"),
step("source-hashes", "Gerar hashes de fonte e registros", "mcps", ("sourcePayloadHash", "sourceRecordsHash"), "comparar hashes", "mesma fonte"),
step("ui-render", "Renderizar UI sem backend paralelo", "ui", ("panelReady", "sameSource"), "validar tela", "painel confiavel"),
step("gpt-explain", "Explicar a mesma instancia pelo GPT", "mcps", ("gptExplainable", "sameSource"), "pedir explicacao", "resposta coerente"),
step("docs-contract", "Publicar contrato da tela", "docs", ("contractVersion", "schemaVersion"), "exportar contrato", "documentacao viva"),
),
expected_artifacts=("viewInstance", "source hashes", "screen contract", "evidencia HTTP"),
risk_if_missing="Painel e GPT podem mostrar verdades diferentes.",
),
HumanWorkflow(
workflow_id="identity-rbac-denial",
title="Identity com matriz RBAC de negacao",
purpose="Provar permissoes permitidas e negadas por papel, organizacao e escopo.",
primary_profile="juridico",
platforms=("identity", "compliance", "mcps", "customer_ops"),
steps=(
step("role-matrix", "Publicar matriz de papeis", "identity", ("role", "scope", "permission"), "listar papeis", "papel claro"),
step("allow-case", "Testar caminho permitido", "identity", ("allow", "200"), "executar allow", "acao liberada"),
step("deny-case", "Testar caminho negado", "identity", ("deny", "403", "forbidden"), "executar deny", "acao bloqueada"),
step("audit-case", "Registrar auditId da decisao", "compliance", ("auditId", "traceId"), "consultar audit", "evidencia juridica"),
step("support-case", "Explicar negacao para suporte", "customer_ops", ("diagnostic", "nextAction"), "gerar diagnostico", "suporte seguro"),
),
expected_artifacts=("matriz RBAC", "allow evidence", "deny evidence", "auditId", "diagnostico"),
risk_if_missing="Controle de acesso fica baseado em caminho feliz e nao em governanca real.",
),
HumanWorkflow(
workflow_id="compliance-evidence-chain",
title="Cadeia de evidencia Compliance",
purpose="Ligar politica, consentimento, redaction, audit e retencao.",
primary_profile="juridico",
platforms=("compliance", "identity", "docs", "mcps"),
steps=(
step("policy", "Publicar politica aplicavel", "compliance", ("policy", "retention"), "validar politica", "regra clara"),
step("consent", "Registrar consentimento quando aplicavel", "compliance", ("consent", "actorId"), "consultar consentimento", "consentimento auditavel"),
step("redaction", "Aplicar redaction em campos sensiveis", "compliance", ("redaction", "masked"), "rodar redaction", "sem segredo"),
step("audit", "Gerar auditId e evidenceId", "compliance", ("auditId", "evidenceId"), "consultar evidencia", "cadeia de custodia"),
step("docs", "Documentar contrato de privacidade", "docs", ("contrato", "hash"), "validar docs", "memoria institucional"),
),
expected_artifacts=("policy", "consent record", "redaction report", "audit evidence", "docs hash"),
risk_if_missing="A plataforma nao prova governanca juridica de longo prazo.",
),
HumanWorkflow(
workflow_id="customer-ops-incident",
title="Incidente Customer Ops completo",
purpose="Abrir, diagnosticar, encaminhar, resolver e auditar incidente.",
primary_profile="suporte",
platforms=("customer_ops", "identity", "business", "integracoes", "compliance"),
steps=(
step("open", "Abrir incidente", "customer_ops", ("incident", "open"), "criar incidente", "ticket criado"),
step("classify", "Classificar origem e severidade", "customer_ops", ("severity", "domain"), "classificar", "prioridade clara"),
step("handoff", "Encaminhar para owner", "customer_ops", ("handoff", "owner"), "encaminhar", "responsavel definido"),
step("resolve", "Resolver com evidencia", "customer_ops", ("resolved", "evidenceId"), "fechar incidente", "solucao rastreavel"),
step("audit", "Auditar ciclo", "compliance", ("audit", "trace"), "consultar audit", "cadeia completa"),
),
expected_artifacts=("ticket", "classification", "handoff", "resolution", "audit trail"),
risk_if_missing="Suporte perde historico e proxima acao em falhas recorrentes.",
),
HumanWorkflow(
workflow_id="cloudflare-wrangler-operations",
title="Operacao Cloudflare por wrangler",
purpose="Validar runtime real por wrangler, tratando plugin como teste esperado.",
primary_profile="tecnico",
platforms=("integracoes", "mcps", "ui", "public", "gettys"),
steps=(
step("plugin-test", "Registrar tentativa do plugin", "integracoes", ("plugin Cloudflare", "expected"), "registrar tentativa", "premissa cumprida"),
step("wrangler-auth", "Validar autenticacao wrangler quando houver trabalho real", "integracoes", ("wrangler", "whoami"), "wrangler whoami", "identidade operacional"),
step("bindings", "Verificar bindings e secrets", "integracoes", ("bindings", "secrets"), "wrangler secret/list", "runtime configurado"),
step("routes", "Validar rotas e deploy", "integracoes", ("routes", "deploy"), "wrangler deploy/check", "rota viva"),
step("logs", "Coletar logs/health", "integracoes", ("tail", "health"), "wrangler tail/health", "diagnostico real"),
),
expected_artifacts=("plugin attempt", "wrangler status", "bindings", "routes", "health evidence"),
risk_if_missing="Cloudflare fica dependente de plugin experimental ou sem prova live.",
),
HumanWorkflow(
workflow_id="intelligence-promotion",
title="Promocao controlada de Intelligence",
purpose="Tirar Intelligence de unsupported/catalogOnly apenas com endpoint, smoke e contrato.",
primary_profile="ceo",
platforms=("intelligence", "mcps", "docs", "ui"),
steps=(
step("planned-state", "Declarar estado planejado", "intelligence", ("planned", "catalogOnly"), "registrar estado", "sem ambiguidade"),
step("endpoint", "Publicar endpoint minimo", "intelligence", ("health", "profile", "readiness"), "chamar endpoint", "runtime basico"),
step("contract", "Publicar contrato", "intelligence", ("openapi", "schema"), "validar contrato", "surface auditavel"),
step("mcp-register", "Registrar no MCP", "mcps", ("provider", "readiness"), "comparar catalogo", "control-plane ciente"),
step("ui-readiness", "Expor status no painel", "ui", ("panelReady", "sameSource"), "validar tela", "status humano"),
),
expected_artifacts=("state decision", "health smoke", "openapi", "mcp readiness", "panel status"),
risk_if_missing="Intelligence permanece meio provider e confunde readiness global.",
),
HumanWorkflow(
workflow_id="release-and-rollback",
title="Release com rollback e contrato",
purpose="Promover mudanca sem quebrar contrato, UI, GPT ou auditoria.",
primary_profile="gestor_operacional",
platforms=("platform_base", "mcps", "docs", "ui"),
steps=(
step("version", "Gerar contractVersion/schemaVersion", "platform_base", ("contractVersion", "schemaVersion"), "exportar versao", "versao clara"),
step("compat", "Declarar compatibilidade", "platform_base", ("compatibilityVersion", "breakingChanges"), "validar compat", "risco explicito"),
step("smoke", "Executar smoke regressivo", "mcps", ("smoke", "readiness"), "rodar smoke", "prova tecnica"),
step("rollback", "Registrar rollback", "platform_base", ("rollback", "previousVersion"), "validar rollback", "reversao possivel"),
step("docs", "Publicar changelog", "docs", ("changelog", "migrationNotes"), "validar docs", "memoria de mudanca"),
),
expected_artifacts=("version", "compatibility", "smoke report", "rollback plan", "changelog"),
risk_if_missing="Mudancas futuras quebram contratos sem trilha de reversao.",
),
)
def signal_text_for_card(portfolio: EcosystemGovernancePortfolio, platform_id: str) -> str:
card = portfolio.card_for(platform_id)
if card is None:
return ""
parts: list[str] = [card.platform_id, card.status_label, card.maturity.value]
for check in card.checks:
parts.extend([check.check_id, check.title, check.reason, check.next_action, check.status.value])
for evidence in check.evidence:
parts.append(evidence.path)
parts.append(evidence.summary)
return "\n".join(parts).lower()
def evaluate_step(step_item: WorkflowStep, portfolio: EcosystemGovernancePortfolio) -> tuple[bool, tuple[str, ...]]:
text = signal_text_for_card(portfolio, step_item.owner_platform)
missing = tuple(signal for signal in step_item.required_signals if signal.lower() not in text)
return (not missing, missing)
def evaluate_workflow(workflow: HumanWorkflow, portfolio: EcosystemGovernancePortfolio) -> WorkflowEvaluation:
passed = 0
blocking: list[str] = []
actions: list[str] = []
evidence: list[str] = []
for step_item in workflow.steps:
ok, missing = evaluate_step(step_item, portfolio)
card = portfolio.card_for(step_item.owner_platform)
if ok:
passed += 1
evidence.append(f"{step_item.step_id}: {step_item.human_output}")
else:
blocking.append(f"{step_item.step_id}: faltam {', '.join(missing[:4])}")
if card and card.next_actions:
actions.append(card.next_actions[0])
else:
actions.append(step_item.validation)
total = len(workflow.steps)
score = round((passed / total) * 100) if total else 0
status = "pronto" if score >= 85 else "util" if score >= 65 else "atencao" if score >= 40 else "bloqueado"
return WorkflowEvaluation(
workflow_id=workflow.workflow_id,
title=workflow.title,
status=status,
score=score,
passed_steps=passed,
total_steps=total,
blocking_steps=tuple(blocking),
next_actions=merge_unique(actions)[:8],
evidence=tuple(evidence),
)
def build_workflow_portfolio(portfolio: EcosystemGovernancePortfolio, workflows: Sequence[HumanWorkflow] = WORKFLOWS) -> WorkflowPortfolio:
evaluations = tuple(evaluate_workflow(workflow, portfolio) for workflow in workflows)
ready = sum(1 for item in evaluations if item.status in {"pronto", "util"})
blocked = sum(1 for item in evaluations if item.status == "bloqueado")
avg = round(sum(item.score for item in evaluations) / len(evaluations)) if evaluations else 0
summary = (
f"Workflows avaliados: {len(evaluations)}",
f"Workflows prontos/uteis: {ready}",
f"Workflows bloqueados: {blocked}",
f"Score medio de workflow: {avg}",
)
return WorkflowPortfolio(workflows=tuple(workflows), evaluations=evaluations, summary=summary)
def workflow_markdown(portfolio: WorkflowPortfolio) -> str:
lines = ["# Workflows humanos operacionais", ""]
lines.extend(f"- {item}" for item in portfolio.summary)
lines.append("")
by_id = {workflow.workflow_id: workflow for workflow in portfolio.workflows}
for evaluation in sorted(portfolio.evaluations, key=lambda item: (item.score, item.workflow_id)):
workflow = by_id[evaluation.workflow_id]
lines.append(f"## {workflow.title}")
lines.append("")
lines.append(f"- workflow_id: `{workflow.workflow_id}`")
lines.append(f"- perfil principal: `{workflow.primary_profile}`")
lines.append(f"- status: `{evaluation.status}`")
lines.append(f"- score: `{evaluation.score}`")
lines.append(f"- passos: `{evaluation.passed_steps}/{evaluation.total_steps}`")
lines.append(f"- risco se faltar: {workflow.risk_if_missing}")
if evaluation.blocking_steps:
lines.append("- bloqueios:")
for item in evaluation.blocking_steps:
lines.append(f" - {item}")
if evaluation.next_actions:
lines.append("- proximas acoes:")
for item in evaluation.next_actions:
lines.append(f" - {item}")
lines.append("")
lines.append("Passos:")
for step_item in workflow.steps:
lines.append(f"- `{step_item.step_id}` {step_item.title} ({step_item.owner_platform}) -> {step_item.human_output}")
lines.append("")
return "\n".join(lines).strip() + "\n"
def workflow_rows(portfolio: WorkflowPortfolio) -> list[list[str]]:
rows = [["workflow_id", "status", "score", "passed_steps", "total_steps", "blocking_steps", "next_actions"]]
for evaluation in sorted(portfolio.evaluations, key=lambda item: item.workflow_id):
rows.append(
[
evaluation.workflow_id,
evaluation.status,
str(evaluation.score),
str(evaluation.passed_steps),
str(evaluation.total_steps),
" | ".join(evaluation.blocking_steps),
" | ".join(evaluation.next_actions),
]
)
return rows
def workflow_action_items(portfolio: WorkflowPortfolio, limit: int = 25) -> tuple[str, ...]:
actions: list[str] = []
for evaluation in sorted(portfolio.evaluations, key=lambda item: (item.score, item.workflow_id)):
for action in evaluation.next_actions:
actions.append(f"{evaluation.workflow_id}: {action}")
if len(actions) >= limit:
return merge_unique(actions)
return merge_unique(actions)