diff --git a/src/mais_humana/cli.py b/src/mais_humana/cli.py index 7ccd5a2..82d6099 100644 --- a/src/mais_humana/cli.py +++ b/src/mais_humana/cli.py @@ -49,6 +49,7 @@ from .repository_mesh_runtime import ( from .repository_mesh_semantic import write_repository_mesh_semantic_state from .repository_mesh_readiness import build_mesh_readiness_report, write_readiness_artifacts from .repository_mesh_gitea import build_gitea_mesh_plan, write_gitea_plan_artifacts +from .router000_exit_orders import run_router000_exit_orders from .scanner import environment_summary, scan_ecosystem from .storage import table_counts from .targeted_sync_audit import run_targeted_sync_audit @@ -182,6 +183,15 @@ def build_parser() -> argparse.ArgumentParser: institutional_assurance.add_argument("--plugin-auth-attempt", default="") institutional_assurance.add_argument("--no-central", action="store_true") institutional_assurance.add_argument("--limit", type=int, default=40) + router000_orders = sub.add_parser("router000-exit-orders", help="Write Router 000 output service orders into affected central folders.") + router000_orders.add_argument("--ecosystem-root", default="G:/_codex-git") + router000_orders.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana") + router000_orders.add_argument( + "--central-projects-root", + default="G:/_codex-git/nucleo-gestao-operacional/central-de-ordem-de-servico/projects", + ) + router000_orders.add_argument("--executive-limit", type=int, default=5) + router000_orders.add_argument("--managerial-limit", type=int, default=5) return parser @@ -677,6 +687,22 @@ def command_institutional_assurance(args: argparse.Namespace) -> int: return 0 +def command_router000_exit_orders(args: argparse.Namespace) -> int: + report, records = run_router000_exit_orders( + ecosystem_root=Path(args.ecosystem_root), + project_root=Path(args.project_root), + central_projects_root=Path(args.central_projects_root), + executive_limit=int(args.executive_limit), + managerial_limit=int(args.managerial_limit), + ) + payload = { + "report": report.to_dict(), + "generatedFiles": [record.path for record in records], + } + print(json.dumps(payload, ensure_ascii=False, indent=2)) + return 0 + + def main(argv: list[str] | None = None) -> int: parser = build_parser() args = parser.parse_args(argv) @@ -728,6 +754,8 @@ def main(argv: list[str] | None = None) -> int: return command_canonical_migration_plan(args) if args.command == "institutional-assurance": return command_institutional_assurance(args) + if args.command == "router000-exit-orders": + return command_router000_exit_orders(args) parser.error(f"unknown command: {args.command}") return 2 diff --git a/src/mais_humana/router000_exit_orders.py b/src/mais_humana/router000_exit_orders.py new file mode 100644 index 0000000..89ff182 --- /dev/null +++ b/src/mais_humana/router000_exit_orders.py @@ -0,0 +1,1339 @@ +"""Router 000 exit-order materialization. + +The permanent router order is broad by design. This module turns the six +institutional decisions into bounded, repeatable service-order output for the +central dossiers affected by the round. It does not decide policy again; it +creates actionable continuity in the official folders with the expected +structure: five executive orders and five managerial orders per platform, each +with five fronts and five themes. +""" + +from __future__ import annotations + +import csv +import io +import json +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Iterable, Mapping, Sequence + +from .institutional_decisions import INSTITUTIONAL_DECISIONS, POLICY_VERSION +from .models import GeneratedFile, as_plain_data, slugify, utc_now +from .storage import connect, upsert_files + + +ROUTER000_ORDER_RELATION = "000-ROTEADOR-PERMANENTE-DE-ORDEM_DE_SERVICO" +ROUTER000_EXIT_ORDER_VERSION = "2026-05-04.router000-exit-orders.v1" + + +@dataclass(frozen=True, slots=True) +class Router000PlatformTarget: + """Central dossier that receives router output orders.""" + + central_folder_name: str + project_id: str + repo_name: str + real_repo: str + owner_role: str + primary_focus: tuple[str, ...] + known_limitation: str = "" + + @property + def canonical_project_id(self) -> str: + if self.project_id.endswith("-plataform"): + return self.project_id.removesuffix("-plataform") + "-platform" + if self.project_id == "tudo-para-ia-mais-humana": + return "tudo-para-ia-mais-humana-platform" + return self.project_id + + def to_dict(self) -> dict[str, Any]: + return as_plain_data(self) + + +@dataclass(frozen=True, slots=True) +class OrderFront: + """One front with five concrete themes.""" + + title: str + themes: tuple[str, ...] + + def to_dict(self) -> dict[str, Any]: + return as_plain_data(self) + + +@dataclass(frozen=True, slots=True) +class Router000OrderDefinition: + """Template-neutral order definition before numbering.""" + + order_type: str + slug: str + title: str + purpose: str + object_scope: str + reason: str + expected_result: str + priority: str + affected_paths: tuple[str, ...] + validations: tuple[str, ...] + fronts: tuple[OrderFront, ...] + + def to_dict(self) -> dict[str, Any]: + return as_plain_data(self) + + +@dataclass(frozen=True, slots=True) +class MaterializedRouter000Order: + """Order definition with final file path and number.""" + + order_id: str + order_type: str + sequence: int + path: str + target: Router000PlatformTarget + definition: Router000OrderDefinition + + def to_dict(self) -> dict[str, Any]: + return as_plain_data(self) + + +@dataclass(frozen=True, slots=True) +class Router000ExitOrderReport: + """Result of writing router output orders.""" + + report_id: str + generated_at: str + version: str + policy_version: str + central_projects_root: str + project_root: str + platforms_requested: int + platforms_written: int + executive_orders: int + managerial_orders: int + generated_files: tuple[str, ...] + warnings: tuple[str, ...] + source_hash: str + + def to_dict(self) -> dict[str, Any]: + return as_plain_data(self) + + +def default_router000_platform_targets(ecosystem_root: Path, central_projects_root: Path) -> tuple[Router000PlatformTarget, ...]: + """Return platform dossiers affected by the current router order.""" + + del central_projects_root + return ( + Router000PlatformTarget( + central_folder_name="_repo_nucleo-gestao-operacional", + project_id="nucleo-gestao-operacional", + repo_name="nucleo-gestao-operacional", + real_repo=str(ecosystem_root / "nucleo-gestao-operacional"), + owner_role="governance", + primary_focus=( + "retention", + "cleanup", + "canonical_names", + "development_execution", + ), + ), + Router000PlatformTarget( + central_folder_name="03_repo_tudo-para-ia-customer-ops-platform", + project_id="tudo-para-ia-customer-ops-platform", + repo_name="tudo-para-ia-customer-ops-platform", + real_repo=str(ecosystem_root / "tudo-para-ia-customer-ops-platform"), + owner_role="customer_operations", + primary_focus=("retention", "development_execution", "mcp_acceptance"), + ), + Router000PlatformTarget( + central_folder_name="04_repo_tudo-para-ia-docs-plataform", + project_id="tudo-para-ia-docs-plataform", + repo_name="tudo-para-ia-docs-plataform", + real_repo=str(ecosystem_root / "tudo-para-ia-docs-plataform"), + owner_role="docs_full_platform", + primary_focus=("docs_full", "semantic_search", "retention"), + ), + Router000PlatformTarget( + central_folder_name="08_repo_tudo-para-ia-integracoes-plataform", + project_id="tudo-para-ia-integracoes-plataform", + repo_name="tudo-para-ia-integracoes-plataform", + real_repo=str(ecosystem_root / "tudo-para-ia-integracoes-plataform"), + owner_role="integration_owner", + primary_focus=("mcp_acceptance", "canonical_names", "biblioteca_privada"), + ), + Router000PlatformTarget( + central_folder_name="10_repo_tudo-para-ia-mcps-internos-plataform", + project_id="tudo-para-ia-mcps-internos-plataform", + repo_name="tudo-para-ia-mcps-internos-plataform", + real_repo=str(ecosystem_root / "tudo-para-ia-mcps-internos-plataform"), + owner_role="mcp_control_plane", + primary_focus=("mcp_acceptance", "admin_ui", "development_execution"), + ), + Router000PlatformTarget( + central_folder_name="14_repo_tudo-para-ia-ui-platform", + project_id="tudo-para-ia-ui-platform", + repo_name="tudo-para-ia-ui-platform", + real_repo=str(ecosystem_root / "tudo-para-ia-ui-platform"), + owner_role="same_source_renderer", + primary_focus=("docs_full", "mcp_acceptance", "same_source_ui"), + ), + Router000PlatformTarget( + central_folder_name="15_repo_tudo-para-ia-mais-humana-platform", + project_id="tudo-para-ia-mais-humana", + repo_name="tudo-para-ia-mais-humana", + real_repo=str(ecosystem_root / "tudo-para-ia-mais-humana"), + owner_role="human_assurance", + primary_focus=tuple(decision.decision_id for decision in INSTITUTIONAL_DECISIONS), + ), + Router000PlatformTarget( + central_folder_name="mcps-gateway", + project_id="mcps-gateway", + repo_name="mcps-gateway", + real_repo=str(ecosystem_root / "mcps-gateway"), + owner_role="gateway_dossier_pending", + primary_focus=("mcp_acceptance", "gateway_readback"), + known_limitation="Repositorio real local nao confirmado; manter continuidade gerencial e evidencia de localizacao.", + ), + ) + + +def _front(title: str, themes: Iterable[str]) -> OrderFront: + items = tuple(str(item).strip() for item in themes if str(item).strip()) + if len(items) != 5: + raise ValueError(f"front {title!r} must contain exactly five themes") + return OrderFront(title=title, themes=items) + + +def _shared_validations(target: Router000PlatformTarget) -> tuple[str, ...]: + validations = [ + "registrar evidencia no dossie central da plataforma", + "atualizar controle-semantico.sqlite quando arquivo for criado ou alterado", + "registrar EXECUTADO e PENDENCIAS-CODEX com valores sensiveis redigidos", + "validar que aceite externo usa rota plataforma -> MCP -> plataforma", + "registrar sandbox workspace-write como pendencia real de deploy/push", + ] + if target.known_limitation: + validations.append("confirmar ou atualizar a limitacao material registrada no README da pasta") + return tuple(validations) + + +def _shared_paths(target: Router000PlatformTarget, central_folder: Path) -> tuple[str, ...]: + paths = [ + str(central_folder / "README.md"), + str(central_folder / "current"), + str(central_folder / "orders"), + str(central_folder / "reports"), + str(central_folder / "indexes"), + str(central_folder / "status"), + str(central_folder / "audit"), + str(central_folder / "controle-semantico.sqlite"), + target.real_repo, + ] + return tuple(paths) + + +def executive_order_definitions(target: Router000PlatformTarget, central_folder: Path) -> tuple[Router000OrderDefinition, ...]: + """Build five executive output orders for one platform target.""" + + paths = _shared_paths(target, central_folder) + validations = _shared_validations(target) + return ( + Router000OrderDefinition( + order_type="EXECUTIVA", + slug="router000-materializar-docs-pleno-response-ready", + title="Materializar Docs pleno e resposta por suficiencia documental", + purpose="Transformar a decisao Docs pleno em evidencia operacional consumivel pela plataforma.", + object_scope=f"Plataforma {target.project_id}; foco {', '.join(target.primary_focus)}.", + reason="A ordem 000 define Docs como plataforma plena e exige fonte, hash, autoridade e lacuna quando a resposta nao existir.", + expected_result="Docs deixa de ser tratado como catalogOnly permanente e a plataforma registra como consome ou produz evidencia documental.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Docs como plataforma operacional", + ( + "Confirmar se a plataforma produz documento institucional, consome Docs ou depende de resposta Docs.", + "Mapear documentos responseReady relevantes para a plataforma.", + "Separar documento encontrado, parcial, nao encontrado e conflitante.", + "Registrar authority, sourceHash, version e data para documento usado.", + "Criar pendencia automatica quando a resposta documental nao existir.", + ), + ), + _front( + "Busca semantica e cobertura", + ( + "Identificar temas da plataforma que precisam de busca semantica em Docs.", + "Relacionar cobertura por plataforma, equipe e funcao humana.", + "Registrar lacunas de cobertura em reports e SQL semantico.", + "Garantir leitura operacional ampla sem escrita desgovernada.", + "Preparar consulta GPT a Docs como fonte institucional.", + ), + ), + _front( + "Evidencia documental", + ( + "Gerar ou atualizar evidenceId documental.", + "Validar que hashes nao exponham segredo bruto.", + "Vincular documento a contrato, ordem ou decisao institucional.", + "Registrar traceId e auditId quando houver consumo via MCP.", + "Publicar resumo compacto para UI/GPT quando aplicavel.", + ), + ), + _front( + "Integracao com MCP e UI", + ( + "Definir envelope de consulta Docs via MCP para consumo cross-platform.", + "Garantir que UI renderize a mesma fonte que GPT explica.", + "Registrar permissao minima para leitura administrativa.", + "Validar status responseReady em smoke local ou report tecnico.", + "Criar ordem tecnica se faltar tool ou endpoint material.", + ), + ), + _front( + "Fechamento executivo", + ( + "Registrar EXECUTADO com arquivos e comandos usados.", + "Registrar PENDENCIAS-CODEX sem valores sensiveis.", + "Atualizar indice da rodada.", + "Atualizar status da plataforma.", + "Deixar continuidade especifica quando a evidencia for parcial.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="EXECUTIVA", + slug="router000-validar-aceite-mcp-cross-platform", + title="Validar aceite obrigatorio via MCP em circulacao cross-platform", + purpose="Impedir aceite puramente bilateral quando houver dado, contrato, comando ou evidencia entre plataformas.", + object_scope=f"Plataforma {target.project_id}; relacao com MCPs Internos e consumidores diretos.", + reason="A decisao institucional exige rota plataforma -> MCP -> plataforma com contrato, hash, permissao e evidencia.", + expected_result="Toda circulacao administrativa externa da plataforma tem envelope MCP completo ou excecao formal registrada.", + priority="critica", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Origem e destino", + ( + "Mapear originPlatformId da plataforma ou modulo.", + "Mapear destinationPlatformId de cada consumidor externo.", + "Separar fluxo interno de fluxo cross-platform.", + "Bloquear bypass direto quando houver circulacao administrativa.", + "Registrar excecao apenas com prova de gargalo material.", + ), + ), + _front( + "Contrato e hash", + ( + "Exigir contractId e contractHash.", + "Exigir sourcePayloadHash e sourceRecordsHash.", + "Validar que acceptanceHash nao substitui contrato.", + "Registrar versao do contrato e politica de deprecacao.", + "Criar pendencia quando hash estiver ausente.", + ), + ), + _front( + "Etiquetas e rastreabilidade", + ( + "Gerar inputLabel para entrada MCP.", + "Preservar traceId durante processamento.", + "Gerar outputLabel para saida ao consumidor.", + "Registrar auditId e evidenceId.", + "Manter lastro em report e SQL semantico.", + ), + ), + _front( + "Permissao do consumidor", + ( + "Validar permissionId requerido.", + "Verificar se consumidor tem escopo operacional.", + "Separar leitura, acao, auditoria e explicacao.", + "Garantir que segredo bruto nao atravesse o envelope.", + "Registrar falha como bloqueio real quando permissao faltar.", + ), + ), + _front( + "Evidencia de aceite", + ( + "Registrar accepted, rejected ou partial.", + "Guardar payload compacto para UI/GPT.", + "Gerar evidencia antes/depois quando houver operacao sensivel.", + "Atualizar indice de aceites.", + "Criar continuidade para readback remoto quando workspace-write impedir deploy.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="EXECUTIVA", + slug="router000-aplicar-retencao-legalhold-rollback", + title="Aplicar retencao 10 anos, legal hold e rollback operacional", + purpose="Classificar evidencias institucionais e operacoes sensiveis com retencao e rollback.", + object_scope=f"Dossie central, projeto real e artefatos de evidencia de {target.project_id}.", + reason="A ordem 000 define retencao padrao de 10 anos para prova institucional e exige rollback em operacao sensivel.", + expected_result="Contratos, aceites, hashes, auditorias e decisoes ficam classificados como institutional_10y quando aplicavel.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Classificacao de evidencia", + ( + "Classificar decisao, contrato, aceite, evidencia e auditoria como institutional_10y.", + "Separar cache, build, temporario e log bruto sem valor unico.", + "Registrar quando uma duplicata operacional pode ter prazo menor.", + "Marcar evidencia unica como nao descartavel.", + "Atualizar SQL semantico com funcao do arquivo alterado.", + ), + ), + _front( + "Legal hold", + ( + "Definir campo legalHold para evidencia sensivel.", + "Bloquear descarte quando legalHold estiver ativo.", + "Registrar owner responsavel por liberar descarte futuro.", + "Relacionar legalHold a contrato ou ordem quando existir.", + "Criar pendencia se nao houver owner material.", + ), + ), + _front( + "Rollback obrigatorio", + ( + "Exigir rollbackPlan para publicacao, migracao e mudanca de contrato.", + "Registrar beforeAfterEvidence para alteracao sensivel.", + "Bloquear operacao de risco sem plano de reversao.", + "Separar dryRun de execucao real.", + "Gerar ordem de deploy apenas quando houver permissao material.", + ), + ), + _front( + "Readback e prova", + ( + "Guardar readback local quando remoto estiver bloqueado.", + "Registrar pendencia workspace-write para deploy/push.", + "Validar que hashes nao exponham segredo.", + "Conectar evidencia ao MCP quando for cross-platform.", + "Atualizar status com impacto operacional.", + ), + ), + _front( + "Fechamento de retencao", + ( + "Atualizar reports com classificacao aplicada.", + "Atualizar indexes com fonte da evidencia.", + "Atualizar audit com risco residual.", + "Registrar ordem de saida para lacuna material.", + "Confirmar que limpeza operacional nao foi chamada de expurgo.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="EXECUTIVA", + slug="router000-normalizar-nomes-e-biblioteca-privada", + title="Normalizar nomes canonicos platform e Biblioteca Privada como modulo", + purpose="Evitar que aliases legados criem plataformas duplicadas ou owners errados.", + object_scope=f"Nomes, aliases, matrizes e contratos relacionados a {target.project_id}.", + reason="A ordem 000 define sufixo canonico platform e reposiciona Biblioteca Privada como modulo/produto de Integracoes.", + expected_result="A plataforma registra nome canonico, alias legado e owner correto sem criar ownerPlatformId indevido.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Nome canonico", + ( + "Identificar projectId atual e canonicalProjectId.", + "Manter numero apenas na pasta central.", + "Tratar sufixo plataform como alias legado.", + "Evitar novo providerId baseado em alias.", + "Registrar politica de deprecacao de alias quando houver rename futuro.", + ), + ), + _front( + "Biblioteca Privada", + ( + "Remover Biblioteca Privada de listas como plataforma autonoma.", + "Registrar ownerPlatformId correto como Integracoes.", + "Tratar Biblioteca como app, produto ou modulo conforme arquitetura real.", + "Revisar productId e appId quando houver evidencia material.", + "Criar pendencia se documento ainda apontar owner proprio.", + ), + ), + _front( + "Matrizes e readiness", + ( + "Revisar matriz de plataformas da pasta.", + "Revisar readiness e relatorios existentes.", + "Atualizar Docs e UI quando consumirem nomes legados.", + "Garantir que SQL semantico nao duplique plataforma.", + "Registrar divergencia sem alterar repositorio real sem janela institucional.", + ), + ), + _front( + "Contratos MCP", + ( + "Validar originPlatformId e destinationPlatformId canonicos.", + "Permitir aliases legados apenas como compatibilidade temporaria.", + "Registrar aliasStatus no envelope quando aplicavel.", + "Atualizar evidenceId de aceite se nome mudar.", + "Criar ordem para consumidores que ainda exigem nome legado sem contrato.", + ), + ), + _front( + "Fechamento de nomenclatura", + ( + "Registrar arquivos revisados.", + "Atualizar indice de aliases.", + "Atualizar pendencias reais.", + "Registrar risco de rename se houver deploy ativo.", + "Deixar proxima ordem apenas para dependencias nao resolvidas.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="EXECUTIVA", + slug="router000-habilitar-execucao-dev-auditavel", + title="Habilitar execucao ampla GPT/Codex em desenvolvimento com auditoria", + purpose="Permitir simulacao operacional segura sem bloquear a rodada por leitura passiva indevida.", + object_scope=f"Fluxos de desenvolvimento, teste, simulacao e administracao em {target.project_id}.", + reason="A ordem 000 autoriza GPT/Codex em desenvolvimento, bloqueando apenas destruicao, segredo e efeito externo real sem autorizacao.", + expected_result="A plataforma diferencia simulacao auditavel de acao real, destrutiva, externa ou com segredo.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Acoes permitidas em desenvolvimento", + ( + "Simular usuario, organizacao e tenant com truthState.", + "Simular compra, credencial e sessao sem valor real exposto.", + "Operar admin e suporte simulados com actorId.", + "Criar massa de teste reversivel.", + "Executar validacao local e dryRun quando aplicavel.", + ), + ), + _front( + "Controles obrigatorios", + ( + "Exigir truthState em cada evidencia.", + "Exigir actorId para GPT/Codex.", + "Exigir auditId para acao administrativa.", + "Registrar dryRun quando a acao nao deve afetar estado real.", + "Marcar real, simulado, teste, parcial, stale, bloqueado ou derivado.", + ), + ), + _front( + "Bloqueios absolutos", + ( + "Bloquear destruicao de banco ou repositorio.", + "Bloquear apagamento de historico, evidencia ou dado real.", + "Bloquear cobranca real e mensagem real a terceiro.", + "Bloquear vazamento de segredo bruto.", + "Bloquear producao sem rollback e autorizacao.", + ), + ), + _front( + "Evidencia e redaction", + ( + "Usar credentialRef em vez de valor secreto.", + "Redigir Authorization, token, cookie e cfat quando aparecerem em fonte.", + "Registrar tentativa Cloudflare sem repetir token.", + "Gerar evidencia de falha de plugin como on-request negado/cancelado.", + "Atualizar pendencias sem valor sensivel.", + ), + ), + _front( + "Fechamento de execucao", + ( + "Registrar comando ou teste usado.", + "Atualizar reports com resultado.", + "Atualizar status com bloqueio real se workspace-write impedir deploy.", + "Criar ordem de saida para validacao remota.", + "Evitar transformar bloqueio resolvivel localmente em ordem futura.", + ), + ), + ), + ), + ) + + +def managerial_order_definitions(target: Router000PlatformTarget, central_folder: Path) -> tuple[Router000OrderDefinition, ...]: + """Build five managerial output orders for one platform target.""" + + paths = _shared_paths(target, central_folder) + validations = _shared_validations(target) + return ( + Router000OrderDefinition( + order_type="GERENCIAL", + slug="router000-governar-docs-pleno-conhecimento-operacional", + title="Governar Docs pleno como conhecimento operacional da plataforma", + purpose="Avaliar a plataforma no ecossistema e pactuar sua relacao com Docs pleno.", + object_scope=f"Governanca documental de {target.project_id} e relacao com Docs, MCP e UI.", + reason="Sem governanca, Docs pode voltar a ser tratado como catalogOnly ou fonte insuficiente para GPT e equipes.", + expected_result="A plataforma possui direcao gerencial para cobertura documental, resposta de suficiencia e pendencias automaticas.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Papel de Docs", + ( + "Definir se a plataforma e produtora, consumidora ou validadora de Docs.", + "Pactuar autoridade documental por tema.", + "Definir criterio de resposta found, partial, not_found e conflicting.", + "Definir prioridade de lacunas documentais.", + "Garantir que Docs seja fonte para GPT e painel humano.", + ), + ), + _front( + "Cobertura institucional", + ( + "Cobrir plataforma, equipe e funcao humana.", + "Relacionar contratos e runbooks obrigatorios.", + "Definir owner de atualizacao documental.", + "Criar maturidade por documento.", + "Definir criterio de aceite de documentacao nova.", + ), + ), + _front( + "Risco gerencial", + ( + "Identificar risco de resposta inventada.", + "Identificar conflito entre documentos e codigo.", + "Identificar ausencia de autoridade.", + "Registrar pendencia que impacta cliente/equipe.", + "Priorizar lacunas com efeito operacional real.", + ), + ), + _front( + "Integração com ecossistema", + ( + "Conectar Docs a MCPs Internos.", + "Conectar Docs a UI Platform como renderizacao.", + "Conectar Docs a Identity e Business quando houver entitlement ou permissao.", + "Conectar Docs a Customer Ops para suporte.", + "Conectar Docs a Mais Humana para traducao por perfil.", + ), + ), + _front( + "Direcao de continuidade", + ( + "Definir ordens executivas derivadas.", + "Definir ordem gerencial de maturidade documental.", + "Registrar evidencia minima para proxima rodada.", + "Separar decisao humana tomada de tarefa Codex.", + "Atualizar estado e indices gerenciais.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="GERENCIAL", + slug="router000-pactuar-mcp-como-ponta-de-aceite", + title="Pactuar MCP como ponta obrigatoria de aceite cross-platform", + purpose="Governar a circulacao administrativa da plataforma dentro do modelo plataforma -> MCP -> plataforma.", + object_scope=f"Contratos, aceites, evidencias e permissoes cross-platform de {target.project_id}.", + reason="Aceites bilaterais sem MCP reduzem rastreabilidade, permissao e auditoria do ecossistema.", + expected_result="A plataforma tem criterio gerencial claro para aceite via MCP, excecoes formais e evidencias.", + priority="critica", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Modelo de aceite", + ( + "Pactuar que circulacao externa passa pelo MCP.", + "Definir quais fluxos permanecem internos.", + "Definir o que conta como dado, contrato, comando e evidencia.", + "Definir requisito minimo de envelope.", + "Definir status aceito, rejeitado, parcial e bloqueado.", + ), + ), + _front( + "Governanca de contrato", + ( + "Definir owner do contrato.", + "Definir versionamento e hash.", + "Definir permissao por consumidor.", + "Definir politica de alias/deprecacao.", + "Definir evidencias de aceite e readback.", + ), + ), + _front( + "Excecao formal", + ( + "Permitir cache, fila, snapshot ou read model apenas com prova.", + "Exigir gargalo material mensurado.", + "Exigir ownerApprovalId.", + "Exigir rollbackPlan.", + "Exigir alternativa com evidencia MCP.", + ), + ), + _front( + "Auditoria e maturidade", + ( + "Criar matriz de fluxos cross-platform.", + "Classificar fluxos por risco e criticidade.", + "Definir SLO de aceite e readback.", + "Definir responsavel por falhas de permissao.", + "Incluir equipe e cliente no impacto final.", + ), + ), + _front( + "Continuidade gerencial", + ( + "Criar ordens por fluxo sem evidencia.", + "Registrar bloqueios por credencial externa.", + "Separar deploy pendente de governanca concluida.", + "Atualizar reports e status.", + "Consolidar pendencias reais para a proxima rodada.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="GERENCIAL", + slug="router000-homologar-retencao-legalhold-rollback", + title="Homologar retencao institucional, legal hold e rollback", + purpose="Definir governanca de preservacao de evidencia por 10 anos e operacao sensivel reversivel.", + object_scope=f"Politica de evidencia, auditoria, contrato e rollback de {target.project_id}.", + reason="Evidencias sem retencao e operacoes sem rollback fragilizam responsabilidade institucional.", + expected_result="A plataforma tem politica gerencial de retencao 10 anos, legal hold e rollback para operacoes de risco.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Classe de retencao", + ( + "Definir institutional_10y para evidencia unica.", + "Definir classes menores para cache e temporarios.", + "Definir criterios de duplicata operacional.", + "Definir retencao de auditoria e decisao institucional.", + "Definir prova minima para suporte e cliente.", + ), + ), + _front( + "Legal hold", + ( + "Definir gatilhos de legal hold.", + "Definir responsavel por ativacao.", + "Definir como bloqueia descarte.", + "Definir registro no SQL semantico.", + "Definir auditoria de liberacao futura.", + ), + ), + _front( + "Rollback e operacao sensivel", + ( + "Definir quais operacoes exigem rollback.", + "Definir evidencia antes/depois.", + "Definir plano de reversao por tipo de operacao.", + "Definir bloqueio gerencial sem rollback.", + "Definir validacao pos-operacao.", + ), + ), + _front( + "Impacto no ecossistema", + ( + "Relacionar Identity quando houver usuario/tenant.", + "Relacionar Business quando houver cobranca/entitlement.", + "Relacionar Docs quando houver contrato/documento.", + "Relacionar MCP quando houver aceite externo.", + "Relacionar Customer Ops quando houver atendimento.", + ), + ), + _front( + "Saida gerencial", + ( + "Criar ordens para evidencias sem classe.", + "Criar ordens para operacoes sem rollback.", + "Registrar pendencia de deploy/push por workspace-write.", + "Atualizar overview e released quando aplicavel.", + "Fechar a rodada com risco residual explicito.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="GERENCIAL", + slug="router000-governar-limpeza-operacional-sem-expurgo", + title="Governar limpeza operacional sem expurgo de dado vivo sensivel", + purpose="Separar limpeza operacional permitida de expurgo proibido de dado vivo sensivel.", + object_scope=f"Politica de workspace, artefatos e dados sensiveis de {target.project_id}.", + reason="Chamadas imprecisas de expurgo podem levar a apagamento indevido de dado vivo, evidencia ou historico.", + expected_result="A plataforma tem vocabulario e criterio gerencial para limpeza, higienizacao, inventario e rematerializacao.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Termos permitidos", + ( + "Usar limpeza operacional para cache, build e temporarios.", + "Usar higienizacao de workspace para organizacao local.", + "Usar inventario de peso para projeto inchado.", + "Usar rematerializacao limpa quando repositorio estiver contaminado.", + "Usar segregacao de artefatos para evidencias antigas.", + ), + ), + _front( + "Termos proibidos", + ( + "Nao chamar cache/build de expurgo.", + "Nao expurgar dado vivo sensivel.", + "Nao apagar evidencia unica.", + "Nao apagar historico ou repositorio.", + "Nao transformar worktree suja em descarte sem inventario.", + ), + ), + _front( + "Inventario de peso", + ( + "Classificar node_modules, dist, coverage e temporarios.", + "Identificar artefatos reinstalaveis.", + "Identificar evidencia antiga que deve ser arquivada.", + "Identificar dados sensiveis que nao podem ser apagados.", + "Registrar plano de rematerializacao quando necessario.", + ), + ), + _front( + "Risco de cliente e equipe", + ( + "Avaliar impacto em suporte e auditoria.", + "Avaliar impacto em deploy e rollback.", + "Avaliar impacto em Docs e contratos.", + "Avaliar impacto em MCP e readback.", + "Avaliar impacto em continuidade de ordens.", + ), + ), + _front( + "Fechamento e continuidade", + ( + "Atualizar pendencias sem usar segredo.", + "Criar ordem tecnica para inventario material.", + "Criar ordem gerencial para politica de descarte.", + "Atualizar SQL semantico.", + "Registrar criterio de pronto para limpeza futura.", + ), + ), + ), + ), + Router000OrderDefinition( + order_type="GERENCIAL", + slug="router000-pactuar-nomes-canonicos-e-execucao-dev", + title="Pactuar nomes canonicos e execucao ampla em desenvolvimento", + purpose="Governar identidade canonica da plataforma e permissao operacional GPT/Codex em ambiente de desenvolvimento.", + object_scope=f"Nomes, aliases, permissoes de desenvolvimento e simulacoes de {target.project_id}.", + reason="Alias legado e bloqueio abstrato de GPT/Codex geram retrabalho, duplicidade e rodada passiva.", + expected_result="A plataforma tem acordo gerencial sobre canonicalName, aliasStatus, ownerPlatformId e controles de execucao segura.", + priority="alta", + affected_paths=paths, + validations=validations, + fronts=( + _front( + "Canonical names", + ( + "Definir canonicalName com sufixo final platform.", + "Definir aliasStatus para nomes plataform/plataforma.", + "Definir ownerPlatformId sem numero da pasta.", + "Definir providerId e productId quando aplicavel.", + "Registrar Biblioteca Privada como modulo quando aparecer.", + ), + ), + _front( + "Execucao GPT/Codex", + ( + "Autorizar simulacao segura em desenvolvimento.", + "Diferenciar permissao institucional de ferramenta disponivel.", + "Definir controles truthState, actorId e auditId.", + "Definir dryRun quando aplicavel.", + "Definir marcas real, simulado, teste, parcial, stale, bloqueado e derivado.", + ), + ), + _front( + "Bloqueios que exigem autorizacao", + ( + "Ação destrutiva exige autorizacao e rollback.", + "Efeito externo real exige autorizacao explicita.", + "Segredo bruto nao pode ser vazado.", + "Producao exige rollback e evidencia antes/depois.", + "Cobranca ou mensagem real a terceiro ficam bloqueadas sem aprovacao.", + ), + ), + _front( + "Auditoria humana", + ( + "Mostrar para equipes o que foi simulado.", + "Mostrar para gestores o que ficou bloqueado.", + "Mostrar para clientes apenas evidencia autorizada.", + "Usar Docs como fonte de explicacao.", + "Usar MCP como lastro de aceite.", + ), + ), + _front( + "Continuidade", + ( + "Criar ordens para aliases divergentes.", + "Criar ordens para simulacoes sem auditId.", + "Criar ordens para flows que precisam de dryRun.", + "Registrar pendencia workspace-write.", + "Atualizar status final da plataforma.", + ), + ), + ), + ), + ) + + +def _order_dir(central_folder: Path, order_type: str) -> Path: + return central_folder / "orders" / ("executivas" if order_type == "EXECUTIVA" else "gerenciais") + + +def _existing_max_sequence(order_dir: Path) -> int: + if not order_dir.exists(): + return 0 + max_value = 0 + for path in order_dir.iterdir(): + if not path.is_file(): + continue + match = re.match(r"^(\d{4})_", path.name) + if not match: + continue + max_value = max(max_value, int(match.group(1))) + return max_value + + +def _prefix_for_type(order_type: str) -> str: + return "EXECUTIVA" if order_type == "EXECUTIVA" else "GERENCIAL" + + +def _definition_filename(sequence: int, definition: Router000OrderDefinition) -> str: + return f"{sequence:04d}_{_prefix_for_type(definition.order_type)}__{definition.slug}.md" + + +def materialize_order_paths( + target: Router000PlatformTarget, + central_folder: Path, + definitions: Sequence[Router000OrderDefinition], +) -> tuple[MaterializedRouter000Order, ...]: + """Assign final sequence numbers and paths for definitions.""" + + starts = { + "EXECUTIVA": _existing_max_sequence(_order_dir(central_folder, "EXECUTIVA")), + "GERENCIAL": _existing_max_sequence(_order_dir(central_folder, "GERENCIAL")), + } + counters = dict(starts) + materialized: list[MaterializedRouter000Order] = [] + for definition in definitions: + counters[definition.order_type] += 1 + sequence = counters[definition.order_type] + path = _order_dir(central_folder, definition.order_type) / _definition_filename(sequence, definition) + materialized.append( + MaterializedRouter000Order( + order_id=path.stem, + order_type=definition.order_type, + sequence=sequence, + path=str(path), + target=target, + definition=definition, + ) + ) + return tuple(materialized) + + +def _fronts_markdown(fronts: Sequence[OrderFront]) -> list[str]: + lines: list[str] = [] + for index, front in enumerate(fronts, start=1): + lines.append(f"## Frente {index} - {front.title}") + lines.append("") + lines.append("Temas:") + for theme_index, theme in enumerate(front.themes, start=1): + lines.append(f"{theme_index}. {theme}") + lines.append("") + return lines + + +def render_order_markdown(order: MaterializedRouter000Order) -> str: + """Render an official-template-compatible service order.""" + + d = order.definition + target = order.target + central_folder = str(Path(order.path).parents[2]) + lines = [ + f"# ORDEM DE SERVICO: {order.order_id}", + "", + f"- order_id: `{order.order_id}`", + f"- tipo: `{d.order_type}`", + f"- project_id: `{target.project_id}`", + f"- repo_name: `{target.repo_name}`", + "- status: `planejada`", + f"- prioridade: `{d.priority}`", + f"- router_relation: `{ROUTER000_ORDER_RELATION}`", + f"- policy_version: `{POLICY_VERSION}`", + f"- generator_version: `{ROUTER000_EXIT_ORDER_VERSION}`", + "", + "## Finalidade da ordem de servico", + "", + d.purpose, + "", + "## Objeto da ordem de servico", + "", + d.object_scope, + "", + "## Motivo da criacao da ordem de servico", + "", + d.reason, + "", + "## Resultado esperado da execucao", + "", + d.expected_result, + "", + "## Projeto real e pasta da plataforma", + "", + f"Pasta da plataforma: `{central_folder}`", + "", + f"Projeto real: `{target.real_repo}`", + "", + f"Canonical project id: `{target.canonical_project_id}`", + "", + f"Owner role: `{target.owner_role}`", + "", + ] + if target.known_limitation: + lines.extend(["## Limitacao material conhecida", "", target.known_limitation, ""]) + lines.extend(["## Arquivos e areas afetadas", ""]) + for path in d.affected_paths: + lines.append(f"- `{path}`") + lines.append("") + lines.extend(_fronts_markdown(d.fronts)) + lines.extend(["## Validacoes", ""]) + for validation in d.validations: + lines.append(f"- {validation}") + lines.extend( + [ + "", + "## Criterio de pronto", + "", + "- Os cinco temas de cada frente foram tratados ou receberam pendencia real.", + "- EXECUTADO e PENDENCIAS-CODEX foram registrados sem segredo bruto.", + "- O SQL semantico da pasta recebeu a funcao dos arquivos criados ou alterados.", + "- A plataforma manteve apenas continuidade real, sem ordem artificial.", + "- Deploy Cloudflare e git push foram registrados como pendencia enquanto sandbox_mode for workspace-write.", + "", + "## Nao fazer", + "", + "- Nao reler a 000-ROTEADOR externa para redirecionar esta rodada.", + "- Nao vazar valor de token, Authorization, cookie ou segredo bruto.", + "- Nao chamar limpeza operacional de expurgo.", + "- Nao permitir aceite cross-platform fora do MCP.", + "- Nao tratar Biblioteca Privada como plataforma autonoma.", + "", + ] + ) + return "\n".join(lines).strip() + "\n" + + +def build_platform_order_batch( + target: Router000PlatformTarget, + central_projects_root: Path, + *, + executive_limit: int = 5, + managerial_limit: int = 5, +) -> tuple[MaterializedRouter000Order, ...]: + central_folder = central_projects_root / target.central_folder_name + definitions = ( + executive_order_definitions(target, central_folder)[:executive_limit] + + managerial_order_definitions(target, central_folder)[:managerial_limit] + ) + return materialize_order_paths(target, central_folder, definitions) + + +def build_all_order_batches( + central_projects_root: Path, + targets: Sequence[Router000PlatformTarget], + *, + executive_limit: int = 5, + managerial_limit: int = 5, +) -> tuple[MaterializedRouter000Order, ...]: + orders: list[MaterializedRouter000Order] = [] + for target in targets: + orders.extend( + build_platform_order_batch( + target, + central_projects_root, + executive_limit=executive_limit, + managerial_limit=managerial_limit, + ) + ) + return tuple(orders) + + +def _order_record(order: MaterializedRouter000Order) -> GeneratedFile: + return GeneratedFile( + path=order.path, + description=f"Ordem {order.order_type.lower()} de saida Router 000 para {order.target.project_id}.", + function="router000 exit service order", + file_type="markdown", + changed_by="mais_humana.router000_exit_orders", + change_summary="Criada ordem de continuidade com cinco frentes e cinco temas por frente.", + relation_to_order=ROUTER000_ORDER_RELATION, + ) + + +def _summary_records(project_root: Path) -> tuple[GeneratedFile, ...]: + return ( + GeneratedFile( + path=str(project_root / "dados" / "router000-exit-orders.json"), + description="Relatorio estruturado das ordens de saida geradas para a 000-ROTEADOR.", + function="router000 exit orders report", + file_type="json", + changed_by="mais_humana.router000_exit_orders", + change_summary="Registrado lote de ordens executivas e gerenciais por plataforma.", + relation_to_order=ROUTER000_ORDER_RELATION, + ), + GeneratedFile( + path=str(project_root / "matrizes" / "router000-exit-orders.csv"), + description="Matriz das ordens de saida geradas para a 000-ROTEADOR.", + function="router000 exit orders matrix", + file_type="csv", + changed_by="mais_humana.router000_exit_orders", + change_summary="Criada matriz com plataforma, tipo, sequencia e caminho das ordens.", + relation_to_order=ROUTER000_ORDER_RELATION, + ), + GeneratedFile( + path=str(project_root / "ecossistema" / "ROUTER000-EXIT-ORDERS.md"), + description="Resumo humano das ordens de saida Router 000.", + function="router000 exit orders human summary", + file_type="markdown", + changed_by="mais_humana.router000_exit_orders", + change_summary="Criado resumo de continuidade executiva e gerencial da rodada.", + relation_to_order=ROUTER000_ORDER_RELATION, + ), + GeneratedFile( + path=str(project_root / "dados" / "router000-exit-orders-semantic-status.json"), + description="Status de escrita do SQL semantico das ordens de saida Router 000.", + function="router000 exit orders semantic write status", + file_type="json", + changed_by="mais_humana.router000_exit_orders", + change_summary="Registrado resultado de upsert dos arquivos gerados nos SQLite centrais.", + relation_to_order=ROUTER000_ORDER_RELATION, + ), + ) + + +def exit_orders_rows(orders: Sequence[MaterializedRouter000Order]) -> list[list[str]]: + rows = [["platform", "canonical_project_id", "type", "sequence", "order_id", "path"]] + for order in orders: + rows.append( + [ + order.target.project_id, + order.target.canonical_project_id, + order.order_type, + str(order.sequence), + order.order_id, + order.path, + ] + ) + return rows + + +def rows_to_csv(rows: Sequence[Sequence[str]]) -> str: + buffer = io.StringIO() + writer = csv.writer(buffer, lineterminator="\n") + writer.writerows(rows) + return buffer.getvalue() + + +def exit_orders_markdown(report: Router000ExitOrderReport, orders: Sequence[MaterializedRouter000Order]) -> str: + lines = [ + "# Router 000 - Ordens de saida", + "", + f"- report_id: `{report.report_id}`", + f"- generated_at: `{report.generated_at}`", + f"- version: `{report.version}`", + f"- source_hash: `{report.source_hash}`", + f"- plataformas: `{report.platforms_written}/{report.platforms_requested}`", + f"- executivas: `{report.executive_orders}`", + f"- gerenciais: `{report.managerial_orders}`", + "", + "## Ordens", + "", + ] + by_platform: dict[str, list[MaterializedRouter000Order]] = {} + for order in orders: + by_platform.setdefault(order.target.project_id, []).append(order) + for platform_id, items in sorted(by_platform.items()): + lines.extend([f"### {platform_id}", ""]) + for order in items: + lines.append(f"- `{order.order_id}` -> `{order.path}`") + lines.append("") + lines.extend(["## Avisos", ""]) + if report.warnings: + lines.extend(f"- {warning}" for warning in report.warnings) + else: + lines.append("- Nenhum aviso de materializacao.") + return "\n".join(lines).strip() + "\n" + + +def _stable_hash(payload: Mapping[str, Any]) -> str: + encoded = json.dumps(as_plain_data(payload), ensure_ascii=True, sort_keys=True).encode("utf-8") + import hashlib + + return "sha256:" + hashlib.sha256(encoded).hexdigest() + + +def write_order_files(orders: Sequence[MaterializedRouter000Order]) -> tuple[GeneratedFile, ...]: + records: list[GeneratedFile] = [] + for order in orders: + path = Path(order.path) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(render_order_markdown(order), encoding="utf-8") + records.append(_order_record(order)) + return tuple(records) + + +def write_exit_order_summary( + report: Router000ExitOrderReport, + orders: Sequence[MaterializedRouter000Order], + project_root: Path, +) -> tuple[GeneratedFile, ...]: + targets = { + project_root / "dados" / "router000-exit-orders.json": json.dumps(report.to_dict(), ensure_ascii=False, indent=2, sort_keys=True), + project_root / "matrizes" / "router000-exit-orders.csv": rows_to_csv(exit_orders_rows(orders)), + project_root / "ecossistema" / "ROUTER000-EXIT-ORDERS.md": exit_orders_markdown(report, orders), + } + for path, content in targets.items(): + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding="utf-8") + return _summary_records(project_root)[:3] + + +def write_semantic_records( + project_root: Path, + central_projects_root: Path, + records: Sequence[GeneratedFile], + targets: Sequence[Router000PlatformTarget], +) -> GeneratedFile: + errors: list[dict[str, str]] = [] + try: + with connect(project_root / "controle-semantico.sqlite") as conn: + upsert_files(conn, records) + conn.commit() + except Exception as exc: + errors.append({"sqlite": str(project_root / "controle-semantico.sqlite"), "error": f"{type(exc).__name__}: {exc}"}) + records_by_central: dict[str, list[GeneratedFile]] = {} + for record in records: + path = Path(record.path) + for target in targets: + central = central_projects_root / target.central_folder_name + try: + path.relative_to(central) + except ValueError: + continue + records_by_central.setdefault(str(central), []).append(record) + for central, items in records_by_central.items(): + sqlite_path = Path(central) / "controle-semantico.sqlite" + try: + with connect(sqlite_path) as conn: + upsert_files(conn, items) + conn.commit() + except Exception as exc: + errors.append({"sqlite": str(sqlite_path), "error": f"{type(exc).__name__}: {exc}"}) + status_record = _summary_records(project_root)[3] + status = { + "generatedAt": utc_now(), + "ok": not errors, + "records": len(records), + "centralSqlites": len(records_by_central), + "errors": errors, + } + status_path = Path(status_record.path) + status_path.parent.mkdir(parents=True, exist_ok=True) + status_path.write_text(json.dumps(status, ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8") + try: + with connect(project_root / "controle-semantico.sqlite") as conn: + upsert_files(conn, (status_record,)) + conn.commit() + except Exception: + pass + return status_record + + +def run_router000_exit_orders( + *, + ecosystem_root: Path, + project_root: Path, + central_projects_root: Path, + targets: Sequence[Router000PlatformTarget] | None = None, + executive_limit: int = 5, + managerial_limit: int = 5, +) -> tuple[Router000ExitOrderReport, tuple[GeneratedFile, ...]]: + """Write router output orders and update semantic SQL.""" + + selected_targets = tuple(targets or default_router000_platform_targets(ecosystem_root, central_projects_root)) + warnings: list[str] = [] + existing_targets: list[Router000PlatformTarget] = [] + for target in selected_targets: + central_folder = central_projects_root / target.central_folder_name + if not central_folder.exists(): + warnings.append(f"{target.central_folder_name}: pasta central inexistente; criada durante a materializacao") + central_folder.mkdir(parents=True, exist_ok=True) + if target.known_limitation: + warnings.append(f"{target.project_id}: {target.known_limitation}") + existing_targets.append(target) + orders = build_all_order_batches( + central_projects_root, + existing_targets, + executive_limit=executive_limit, + managerial_limit=managerial_limit, + ) + order_records = write_order_files(orders) + seed = { + "version": ROUTER000_EXIT_ORDER_VERSION, + "orders": [order.to_dict() for order in orders], + "warnings": warnings, + } + source_hash = _stable_hash(seed) + report = Router000ExitOrderReport( + report_id="router000-exit-orders-" + source_hash.removeprefix("sha256:")[:16], + generated_at=utc_now(), + version=ROUTER000_EXIT_ORDER_VERSION, + policy_version=POLICY_VERSION, + central_projects_root=str(central_projects_root), + project_root=str(project_root), + platforms_requested=len(selected_targets), + platforms_written=len(existing_targets), + executive_orders=sum(1 for order in orders if order.order_type == "EXECUTIVA"), + managerial_orders=sum(1 for order in orders if order.order_type == "GERENCIAL"), + generated_files=tuple(record.path for record in order_records), + warnings=tuple(warnings), + source_hash=source_hash, + ) + summary_records = write_exit_order_summary(report, orders, project_root) + all_records = tuple(order_records) + tuple(summary_records) + semantic_status = write_semantic_records(project_root, central_projects_root, all_records, existing_targets) + final_records = all_records + (semantic_status,) + final_report = Router000ExitOrderReport( + report_id=report.report_id, + generated_at=report.generated_at, + version=report.version, + policy_version=report.policy_version, + central_projects_root=report.central_projects_root, + project_root=report.project_root, + platforms_requested=report.platforms_requested, + platforms_written=report.platforms_written, + executive_orders=report.executive_orders, + managerial_orders=report.managerial_orders, + generated_files=tuple(record.path for record in final_records), + warnings=report.warnings, + source_hash=report.source_hash, + ) + report_path = project_root / "dados" / "router000-exit-orders.json" + report_path.write_text(json.dumps(final_report.to_dict(), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8") + return final_report, final_records + diff --git a/tests/test_router000_exit_orders.py b/tests/test_router000_exit_orders.py new file mode 100644 index 0000000..bd2b4db --- /dev/null +++ b/tests/test_router000_exit_orders.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import unittest +from pathlib import Path + +from mais_humana.router000_exit_orders import ( + Router000PlatformTarget, + build_platform_order_batch, + render_order_markdown, + run_router000_exit_orders, +) +from mais_humana.storage import table_counts +from tests.helpers import make_tmp + + +class Router000ExitOrderTests(unittest.TestCase): + def target(self, root: Path) -> Router000PlatformTarget: + return Router000PlatformTarget( + central_folder_name="01_repo_sample-platform", + project_id="sample-plataform", + repo_name="sample-plataform", + real_repo=str(root / "sample-plataform"), + owner_role="sample_owner", + primary_focus=("docs_full", "mcp_acceptance"), + ) + + def test_order_batch_has_five_executive_and_five_managerial_orders(self) -> None: + root = make_tmp() + central = root / "central" + target = self.target(root) + existing_dir = central / target.central_folder_name / "orders" / "executivas" + existing_dir.mkdir(parents=True) + (existing_dir / "0007_EXECUTIVA__existing.md").write_text("existing\n", encoding="utf-8") + + orders = build_platform_order_batch(target, central) + + self.assertEqual(len(orders), 10) + self.assertEqual(sum(1 for order in orders if order.order_type == "EXECUTIVA"), 5) + self.assertEqual(sum(1 for order in orders if order.order_type == "GERENCIAL"), 5) + self.assertEqual(orders[0].sequence, 8) + self.assertTrue(orders[0].order_id.startswith("0008_EXECUTIVA__router000-")) + managerial = [order for order in orders if order.order_type == "GERENCIAL"][0] + self.assertEqual(managerial.sequence, 1) + + def test_rendered_order_contains_five_fronts_with_five_themes_each(self) -> None: + root = make_tmp() + central = root / "central" + order = build_platform_order_batch(self.target(root), central)[0] + + markdown = render_order_markdown(order) + + self.assertIn("# ORDEM DE SERVICO:", markdown) + self.assertEqual(markdown.count("## Frente "), 5) + self.assertIn("Temas:\n1.", markdown) + self.assertIn("5. Criar pendencia automatica quando a resposta documental nao existir.", markdown) + self.assertIn("workspace-write", markdown) + self.assertIn("Nao vazar valor de token", markdown) + + def test_run_writes_orders_summary_and_semantic_records(self) -> None: + root = make_tmp() + ecosystem = root / "ecosystem" + project_root = root / "mais-humana" + central = root / "central" + target = self.target(ecosystem) + (central / target.central_folder_name).mkdir(parents=True) + project_root.mkdir(parents=True) + + report, records = run_router000_exit_orders( + ecosystem_root=ecosystem, + project_root=project_root, + central_projects_root=central, + targets=(target,), + ) + + self.assertEqual(report.platforms_written, 1) + self.assertEqual(report.executive_orders, 5) + self.assertEqual(report.managerial_orders, 5) + self.assertTrue((project_root / "dados" / "router000-exit-orders.json").exists()) + self.assertTrue((project_root / "matrizes" / "router000-exit-orders.csv").exists()) + self.assertTrue((central / target.central_folder_name / "orders" / "executivas").exists()) + self.assertEqual(len([record for record in records if record.file_type == "markdown"]), 11) + counts = table_counts(central / target.central_folder_name / "controle-semantico.sqlite") + self.assertGreaterEqual(counts.get("files", 0), 10) + + +if __name__ == "__main__": + unittest.main()