auto-sync: tudo-para-ia-mais-humana 2026-05-02 00:10:03

This commit is contained in:
2026-05-02 00:10:03 -03:00
parent 6e230bb979
commit 24923362a7
16 changed files with 1598 additions and 28 deletions

View File

@@ -8,7 +8,7 @@
"path": "G:\\_codex-git\\nucleo-gestao-operacional\\central-de-ordem-de-servico\\projects\\15_repo_tudo-para-ia-mais-humana-platform\\reports\\MCP-GATEWAY-ACCESS-POLICY__RODADA015.md"
}
],
"generatedAt": "2026-05-02T02:38:41+00:00",
"generatedAt": "2026-05-02T03:09:23+00:00",
"ok": false,
"policy": "falha de escrita central nao aborta artefatos do projeto real"
}

View File

@@ -136,7 +136,7 @@
}
],
"endpoint": "https://mcps-gateway.ami-app.workers.dev/v1/execute",
"generated_at": "2026-05-02T02:38:41+00:00",
"generated_at": "2026-05-02T03:09:23+00:00",
"liveReady": true,
"log_retention_days": 30,
"policy_version": "mcp-gateway-access-policy.v1",
@@ -150,7 +150,7 @@
"evidence_id": "evidence-a75a27e0669c49da1db8b615",
"http_status": 200,
"method": "POST",
"observed_at": "2026-05-02T02:38:40+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"request_hash": "3e1c8f057ac439f4b9b3eb7f8f5be9ac36323f08adc23db6fc7d51633076b79a",
"response_excerpt": {
@@ -186,7 +186,7 @@
"evidence_id": "evidence-af37a8d489b0038a7a6b5575",
"http_status": 200,
"method": "POST",
"observed_at": "2026-05-02T02:38:41+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"request_hash": "17e7d8039c8c34e3f570b6de8b386edc1cfd0c079084b0c7013016d2c76b388c",
"response_excerpt": {
@@ -222,7 +222,7 @@
"evidence_id": "evidence-3f0e3b9f829c7ff912b335d0",
"http_status": 200,
"method": "POST",
"observed_at": "2026-05-02T02:38:41+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"request_hash": "dae7d91a59e37901d50c027d3a0792f697902bd4289801edb2a508f3baf177fe",
"response_excerpt": {
@@ -251,7 +251,7 @@
}
],
"rate_limit_per_minute": 30,
"report_id": "mcp-gateway-access-policy-15ac101b6174411e",
"report_id": "mcp-gateway-access-policy-4beabbcbe6c59074",
"required_content_type": "application/json",
"required_method": "POST",
"required_user_agent": "Codex-Mais-Humana-MCP-Publication-Gate/1.0",

View File

@@ -8,7 +8,7 @@
"path": "G:\\_codex-git\\nucleo-gestao-operacional\\central-de-ordem-de-servico\\projects\\15_repo_tudo-para-ia-mais-humana-platform\\reports\\executivos\\MCP-PUBLICATION-GATE-MAIS-HUMANA__RODADA015.md"
}
],
"generatedAt": "2026-05-02T02:38:41+00:00",
"generatedAt": "2026-05-02T03:08:48+00:00",
"ok": false,
"policy": "falha de escrita central nao aborta artefatos do projeto real"
}

View File

@@ -17,6 +17,7 @@
"repo_remote": "https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git"
},
"blockers": [
"runner_node_esbuild_spawn_eperm",
"wrangler_auth_not_confirmed",
"canonical_name_requires_institutional_decision",
"git_sync_blocked"
@@ -52,7 +53,7 @@
],
"next_action": "corrigir credencial Git/Schannel e reconciliar ahead/behind sem reset destrutivo",
"order_id": "0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais",
"reason": "fetch/push bloqueado por SEC_E_NO_CREDENTIALS; fetch remoto falhou no ciclo seguro da rodada",
"reason": "sync report 20260501_235902: fetch/probe remote failed with SEC_E_NO_CREDENTIALS on Windows and FETCH_HEAD Permission denied in codex_vm; destructive sync blocked.",
"status": "blocked"
},
{
@@ -111,7 +112,7 @@
"next_action": "homologar host que permita Node, esbuild/workerd e node --test sem spawn EPERM",
"order_id": "0046_GERENCIAL__homologar-runner-oficial-wrangler-node-esbuild",
"reason": "Wrangler autenticou quando executado diretamente, mas deploy dry-run nao ficou confirmado",
"status": "not_run"
"status": "blocked"
},
{
"evidence_refs": [
@@ -149,7 +150,7 @@
"mais_humana.mcp_transit.ledger"
]
},
"generated_at": "2026-05-02T02:38:41+00:00",
"generated_at": "2026-05-02T03:08:48+00:00",
"liveReady": true,
"live_probes": [
{
@@ -158,7 +159,7 @@
"error_code": "",
"evidence_id": "evidence-a75a27e0669c49da1db8b615",
"http_status": 200,
"observed_at": "2026-05-02T02:38:40+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"response_excerpt": {
"__truncated__": true,
@@ -191,7 +192,7 @@
"error_code": "",
"evidence_id": "evidence-af37a8d489b0038a7a6b5575",
"http_status": 200,
"observed_at": "2026-05-02T02:38:41+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"response_excerpt": {
"__truncated__": true,
@@ -224,7 +225,7 @@
"error_code": "",
"evidence_id": "evidence-3f0e3b9f829c7ff912b335d0",
"http_status": 200,
"observed_at": "2026-05-02T02:38:41+00:00",
"observed_at": "2026-05-02T03:08:48+00:00",
"ok": true,
"response_excerpt": {
"__truncated__": true,
@@ -254,7 +255,7 @@
],
"localReady": true,
"provider_id": "mais_humana",
"report_id": "mcp-publication-gate-2026-05-02t0238410000",
"report_id": "mcp-publication-gate-2026-05-02t0308480000",
"status": "partial",
"summary": [
"Provider local Mais Humana pronto: True.",
@@ -266,18 +267,19 @@
"wrangler_runner": {
"account_id": "",
"account_name": "",
"attempted": false,
"attempted": true,
"authenticated": false,
"blockers": [
"runner_node_esbuild_spawn_eperm",
"wrangler_auth_not_confirmed"
],
"command_status": {
"wrangler_deploy_dry_run": "not_confirmed",
"wrangler_version": "unknown",
"wrangler_whoami": "unknown"
"wrangler_deploy_dry_run": "blocked_spawn_eperm",
"wrangler_version": "blocked",
"wrangler_whoami": "blocked"
},
"deploy_dry_run_ok": false,
"raw_summary": "",
"raw_summary": "wrangler direct attempted in round 015 follow-up; wrangler --version, whoami and deployments list all failed before execution with node child_process spawn EPERM on Windows runner; no deploy was attempted.",
"version": ""
}
}

View File

@@ -0,0 +1,10 @@
{
"failures": [
{
"error": "PermissionError: [Errno 13] Permission denied: 'G:\\\\_codex-git\\\\nucleo-gestao-operacional\\\\central-de-ordem-de-servico\\\\projects\\\\15_repo_tudo-para-ia-mais-humana-platform\\\\reports\\\\EXECUTADO__targeted-sync-audit.md'",
"path": "G:\\_codex-git\\nucleo-gestao-operacional\\central-de-ordem-de-servico\\projects\\15_repo_tudo-para-ia-mais-humana-platform\\reports\\EXECUTADO__targeted-sync-audit.md"
}
],
"generatedAt": "2026-05-02T03:09:48+00:00",
"ok": false
}

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,7 @@
# Politica de acesso GPT/MCP Gateway
- report_id: `mcp-gateway-access-policy-15ac101b6174411e`
- generated_at: `2026-05-02T02:38:41+00:00`
- report_id: `mcp-gateway-access-policy-4beabbcbe6c59074`
- generated_at: `2026-05-02T03:09:23+00:00`
- policy_version: `mcp-gateway-access-policy.v1`
- endpoint: `https://mcps-gateway.ami-app.workers.dev/v1/execute`
- status: `passed`

View File

@@ -1,7 +1,7 @@
# Gate de publicacao MCP Mais Humana
- report_id: `mcp-publication-gate-2026-05-02t0238410000`
- generated_at: `2026-05-02T02:38:41+00:00`
- report_id: `mcp-publication-gate-2026-05-02t0308480000`
- generated_at: `2026-05-02T03:08:48+00:00`
- provider_id: `mais_humana`
- current_project_id: `tudo-para-ia-mais-humana`
- canonical_project_id: `tudo-para-ia-mais-humana-platform`
@@ -31,13 +31,14 @@
## Wrangler
- attempted: `False`
- attempted: `True`
- version: `nao_confirmada`
- authenticated: `False`
- account_name: `nao_confirmada`
- account_id: `nao_confirmada`
- deploy_dry_run_ok: `False`
- blockers:
- `runner_node_esbuild_spawn_eperm`
- `wrangler_auth_not_confirmed`
## Probes live
@@ -87,7 +88,7 @@
### 0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais
- status: `blocked`
- motivo: fetch/push bloqueado por SEC_E_NO_CREDENTIALS; fetch remoto falhou no ciclo seguro da rodada
- motivo: sync report 20260501_235902: fetch/probe remote failed with SEC_E_NO_CREDENTIALS on Windows and FETCH_HEAD Permission denied in codex_vm; destructive sync blocked.
- evidencias: `git_sync_status`
- proxima_acao: corrigir credencial Git/Schannel e reconciliar ahead/behind sem reset destrutivo
@@ -128,7 +129,7 @@
### 0046_GERENCIAL__homologar-runner-oficial-wrangler-node-esbuild
- status: `not_run`
- status: `blocked`
- motivo: Wrangler autenticou quando executado diretamente, mas deploy dry-run nao ficou confirmado
- evidencias: `wrangler_runner`
- proxima_acao: homologar host que permita Node, esbuild/workerd e node --test sem spawn EPERM
@@ -142,6 +143,7 @@
## Blockers
- `runner_node_esbuild_spawn_eperm`
- `wrangler_auth_not_confirmed`
- `canonical_name_requires_institutional_decision`
- `git_sync_blocked`

View File

@@ -0,0 +1,82 @@
# Targeted Git Sync Audit
- report_id: `targeted-sync-audit-322089789211`
- generated_at: `2026-05-02T03:09:48+00:00`
- status: `diverged`
- fetch: `True`
## Summary
- Repositories observed: 3.
- Fetch attempted: True.
- Aligned: 0.
- Blocked or divergent: 3.
- No reset, restore, rebase, pull, merge, or push was executed by this audit.
## Repositories
### mais-humana
- role: projeto real da plataforma 15
- path: `G:\_codex-git\tudo-para-ia-mais-humana`
- exists: `True`
- git: `True`
- branch: `main`
- head: `6e230bb97983`
- ahead: `0`
- behind: `0`
- dirty: `True`
- origin: `https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git`
- expected_origin: `https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git`
- status: `acl_blocked`
- decision: corrigir ACL/locks de .git antes de qualquer merge/push
- fetch_exit: `255`
- fetch_output: `error: cannot open '.git/FETCH_HEAD': Permission denied`
- blockers:
- `git_acl_or_lock_blocked`
### mcps-internos
- role: control-plane MCP que publica as tools Mais Humana
- path: `G:\_codex-git\tudo-para-ia-mcps-internos-plataform`
- exists: `True`
- git: `True`
- branch: `main`
- head: `02a0d7b16f52`
- ahead: `0`
- behind: `0`
- dirty: `True`
- origin: `https://git.ami.app.br/admin/tudo-para-ia-mcps-internos-plataform.git`
- expected_origin: `https://git.ami.app.br/admin/tudo-para-ia-mcps-internos-plataform.git`
- status: `acl_blocked`
- decision: corrigir ACL/locks de .git antes de qualquer merge/push
- fetch_exit: `255`
- fetch_output: `error: cannot open '.git/FETCH_HEAD': Permission denied`
- blockers:
- `git_acl_or_lock_blocked`
### nucleo-central
- role: nucleo e central de ordem de servico
- path: `G:\_codex-git\nucleo-gestao-operacional`
- exists: `True`
- git: `True`
- branch: `main`
- head: `e9b83aff598b`
- ahead: `5`
- behind: `5`
- dirty: `True`
- origin: `https://git.ami.app.br/admin/nucleo-gestao-operacional.git`
- expected_origin: `https://git.ami.app.br/admin/nucleo-gestao-operacional.git`
- status: `acl_blocked`
- decision: corrigir ACL/locks de .git antes de qualquer merge/push
- fetch_exit: `255`
- fetch_output: `error: cannot open '.git/FETCH_HEAD': Permission denied`
- blockers:
- `git_acl_or_lock_blocked`
## Blockers
- `mais-humana:git_acl_or_lock_blocked`
- `mcps-internos:git_acl_or_lock_blocked`
- `nucleo-central:git_acl_or_lock_blocked`

View File

@@ -1,11 +1,11 @@
order_id,status,reason,next_action,evidence_refs
0031_EXECUTIVA__publicar-provider-mais-humana-no-mcps-gateway-via-wrangler-homologado,partial,codigo local do provider existe; publicacao live depende de runner Wrangler sem spawn EPERM,homologar runner Node/esbuild/workerd e repetir wrangler deploy --dry-run antes do deploy real,6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff; wrangler_runner
0032_EXECUTIVA__validar-live-tools-mais-humana-v1-execute-com-evidencia,passed,endpoint live foi sondado sem persistir bearer bruto,retestar as tres tools apos deploy do mcps-gateway contendo o provider Mais Humana,evidence-a75a27e0669c49da1db8b615; evidence-af37a8d489b0038a7a6b5575; evidence-3f0e3b9f829c7ff912b335d0
0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais,blocked,fetch/push bloqueado por SEC_E_NO_CREDENTIALS; fetch remoto falhou no ciclo seguro da rodada,corrigir credencial Git/Schannel e reconciliar ahead/behind sem reset destrutivo,git_sync_status
0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais,blocked,sync report 20260501_235902: fetch/probe remote failed with SEC_E_NO_CREDENTIALS on Windows and FETCH_HEAD Permission denied in codex_vm; destructive sync blocked.,corrigir credencial Git/Schannel e reconciliar ahead/behind sem reset destrutivo,git_sync_status
0034_EXECUTIVA__corrigir-acl-escrita-central-e-sql-semantico-plataforma-15,partial,artefatos centrais foram testados pelo gerador de gate; falhas ficam registradas no projeto real,manter escrita automatica central e SQL semantico sob teste em toda rodada,central_write_status
0035_EXECUTIVA__reconciliar-nome-canonico-real-alias-platform,blocked,politica de alias foi materializada sem renome destrutivo,"aguardar decisao institucional antes de renomear remote, pasta central, ownerPlatformId ou referencias historicas",alias_policy
0043_GERENCIAL__aprovar-janela-publicacao-provider-mais-humana-com-rollback,partial,"janela pode ser planejada, mas deploy real ainda depende do runner homologado","definir owner, janela, version atual, rollback e criterio de sucesso antes de deploy real",wrangler_runner; 6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff
0044_GERENCIAL__institucionalizar-ledger-transito-mcp-como-gate-release,passed,ledger MCP existe como contrato local e deve ser criterio de release,aplicar requiredFields em toda publicacao interplataforma,mcp_transit_ledger; 6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff
0045_GERENCIAL__pactuar-politica-acesso-waf-gpt-mcp-gateway,partial,"probes usam User-Agent controlado, bearer redigido e response excerpt seguro","formalizar headers minimos, WAF, rate limit, logs e retencao de evidencias",evidence-a75a27e0669c49da1db8b615; evidence-af37a8d489b0038a7a6b5575; evidence-3f0e3b9f829c7ff912b335d0
0046_GERENCIAL__homologar-runner-oficial-wrangler-node-esbuild,not_run,"Wrangler autenticou quando executado diretamente, mas deploy dry-run nao ficou confirmado","homologar host que permita Node, esbuild/workerd e node --test sem spawn EPERM",wrangler_runner
0046_GERENCIAL__homologar-runner-oficial-wrangler-node-esbuild,blocked,"Wrangler autenticou quando executado diretamente, mas deploy dry-run nao ficou confirmado","homologar host que permita Node, esbuild/workerd e node --test sem spawn EPERM",wrangler_runner
0047_GERENCIAL__decidir-nome-canonico-e-politica-alias-mais-humana,blocked,"nome atual, nome canonico recomendado e aliases estao documentados",registrar decisao formal: preservar alias ou executar migracao coordenada,alias_policy
1 order_id status reason next_action evidence_refs
2 0031_EXECUTIVA__publicar-provider-mais-humana-no-mcps-gateway-via-wrangler-homologado partial codigo local do provider existe; publicacao live depende de runner Wrangler sem spawn EPERM homologar runner Node/esbuild/workerd e repetir wrangler deploy --dry-run antes do deploy real 6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff; wrangler_runner
3 0032_EXECUTIVA__validar-live-tools-mais-humana-v1-execute-com-evidencia passed endpoint live foi sondado sem persistir bearer bruto retestar as tres tools apos deploy do mcps-gateway contendo o provider Mais Humana evidence-a75a27e0669c49da1db8b615; evidence-af37a8d489b0038a7a6b5575; evidence-3f0e3b9f829c7ff912b335d0
4 0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais blocked fetch/push bloqueado por SEC_E_NO_CREDENTIALS; fetch remoto falhou no ciclo seguro da rodada sync report 20260501_235902: fetch/probe remote failed with SEC_E_NO_CREDENTIALS on Windows and FETCH_HEAD Permission denied in codex_vm; destructive sync blocked. corrigir credencial Git/Schannel e reconciliar ahead/behind sem reset destrutivo git_sync_status
5 0034_EXECUTIVA__corrigir-acl-escrita-central-e-sql-semantico-plataforma-15 partial artefatos centrais foram testados pelo gerador de gate; falhas ficam registradas no projeto real manter escrita automatica central e SQL semantico sob teste em toda rodada central_write_status
6 0035_EXECUTIVA__reconciliar-nome-canonico-real-alias-platform blocked politica de alias foi materializada sem renome destrutivo aguardar decisao institucional antes de renomear remote, pasta central, ownerPlatformId ou referencias historicas alias_policy
7 0043_GERENCIAL__aprovar-janela-publicacao-provider-mais-humana-com-rollback partial janela pode ser planejada, mas deploy real ainda depende do runner homologado definir owner, janela, version atual, rollback e criterio de sucesso antes de deploy real wrangler_runner; 6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff
8 0044_GERENCIAL__institucionalizar-ledger-transito-mcp-como-gate-release passed ledger MCP existe como contrato local e deve ser criterio de release aplicar requiredFields em toda publicacao interplataforma mcp_transit_ledger; 6032d87c13f58ddb8ba217955c95baf1841bd1b8b8a98a090282bc562cafb6ff
9 0045_GERENCIAL__pactuar-politica-acesso-waf-gpt-mcp-gateway partial probes usam User-Agent controlado, bearer redigido e response excerpt seguro formalizar headers minimos, WAF, rate limit, logs e retencao de evidencias evidence-a75a27e0669c49da1db8b615; evidence-af37a8d489b0038a7a6b5575; evidence-3f0e3b9f829c7ff912b335d0
10 0046_GERENCIAL__homologar-runner-oficial-wrangler-node-esbuild not_run blocked Wrangler autenticou quando executado diretamente, mas deploy dry-run nao ficou confirmado homologar host que permita Node, esbuild/workerd e node --test sem spawn EPERM wrangler_runner
11 0047_GERENCIAL__decidir-nome-canonico-e-politica-alias-mais-humana blocked nome atual, nome canonico recomendado e aliases estao documentados registrar decisao formal: preservar alias ou executar migracao coordenada alias_policy

View File

@@ -0,0 +1,4 @@
target_id,path,branch,head,ahead,behind,dirty,status,origin,fetch_exit,decision,blockers
mais-humana,G:\_codex-git\tudo-para-ia-mais-humana,main,6e230bb97983,0,0,yes,acl_blocked,https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git,255,corrigir ACL/locks de .git antes de qualquer merge/push,git_acl_or_lock_blocked
mcps-internos,G:\_codex-git\tudo-para-ia-mcps-internos-plataform,main,02a0d7b16f52,0,0,yes,acl_blocked,https://git.ami.app.br/admin/tudo-para-ia-mcps-internos-plataform.git,255,corrigir ACL/locks de .git antes de qualquer merge/push,git_acl_or_lock_blocked
nucleo-central,G:\_codex-git\nucleo-gestao-operacional,main,e9b83aff598b,5,5,yes,acl_blocked,https://git.ami.app.br/admin/nucleo-gestao-operacional.git,255,corrigir ACL/locks de .git antes de qualquer merge/push,git_acl_or_lock_blocked
1 target_id path branch head ahead behind dirty status origin fetch_exit decision blockers
2 mais-humana G:\_codex-git\tudo-para-ia-mais-humana main 6e230bb97983 0 0 yes acl_blocked https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git 255 corrigir ACL/locks de .git antes de qualquer merge/push git_acl_or_lock_blocked
3 mcps-internos G:\_codex-git\tudo-para-ia-mcps-internos-plataform main 02a0d7b16f52 0 0 yes acl_blocked https://git.ami.app.br/admin/tudo-para-ia-mcps-internos-plataform.git 255 corrigir ACL/locks de .git antes de qualquer merge/push git_acl_or_lock_blocked
4 nucleo-central G:\_codex-git\nucleo-gestao-operacional main e9b83aff598b 5 5 yes acl_blocked https://git.ami.app.br/admin/nucleo-gestao-operacional.git 255 corrigir ACL/locks de .git antes de qualquer merge/push git_acl_or_lock_blocked

View File

@@ -36,6 +36,8 @@ from .repository_mesh_readiness import build_mesh_readiness_report, write_readin
from .repository_mesh_gitea import build_gitea_mesh_plan, write_gitea_plan_artifacts
from .scanner import environment_summary, scan_ecosystem
from .storage import table_counts
from .targeted_sync_audit import run_targeted_sync_audit
from .workspace_hygiene import run_workspace_hygiene
def build_parser() -> argparse.ArgumentParser:
@@ -101,6 +103,16 @@ def build_parser() -> argparse.ArgumentParser:
access_policy.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
access_policy.add_argument("--central-platform-folder", default="")
access_policy.add_argument("--publication-gate-json", default="")
hygiene = sub.add_parser("workspace-hygiene", help="Inspect or clean approved local artifacts for closeout.")
hygiene.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
hygiene.add_argument("--central-platform-folder", default="")
hygiene.add_argument("--apply", action="store_true")
sync_audit = sub.add_parser("targeted-sync-audit", help="Write safe Git synchronization audit for the active round repos.")
sync_audit.add_argument("--project-root", default="G:/_codex-git/tudo-para-ia-mais-humana")
sync_audit.add_argument("--mcp-repo-root", default="G:/_codex-git/tudo-para-ia-mcps-internos-plataform")
sync_audit.add_argument("--central-repo-root", default="G:/_codex-git/nucleo-gestao-operacional")
sync_audit.add_argument("--central-platform-folder", default="")
sync_audit.add_argument("--fetch", action="store_true")
return parser
@@ -413,6 +425,38 @@ def command_mcp_access_policy(args: argparse.Namespace) -> int:
return 0
def command_workspace_hygiene(args: argparse.Namespace) -> int:
central_platform_folder = Path(args.central_platform_folder) if args.central_platform_folder else None
report, records = run_workspace_hygiene(
project_root=Path(args.project_root),
central_platform_folder=central_platform_folder,
apply=bool(args.apply),
)
payload = {
"report": report.to_dict(),
"generatedFiles": [record.path for record in records],
}
print(json.dumps(payload, ensure_ascii=False, indent=2))
return 0
def command_targeted_sync_audit(args: argparse.Namespace) -> int:
central_platform_folder = Path(args.central_platform_folder) if args.central_platform_folder else None
report, records = run_targeted_sync_audit(
project_root=Path(args.project_root),
mcp_repo_root=Path(args.mcp_repo_root),
central_repo_root=Path(args.central_repo_root),
central_platform_folder=central_platform_folder,
fetch=bool(args.fetch),
)
payload = {
"report": report.to_dict(),
"generatedFiles": [record.path for record in records],
}
print(json.dumps(payload, ensure_ascii=False, indent=2))
return 0
def main(argv: list[str] | None = None) -> int:
parser = build_parser()
args = parser.parse_args(argv)
@@ -446,6 +490,10 @@ def main(argv: list[str] | None = None) -> int:
return command_mcp_publication_gate(args)
if args.command == "mcp-access-policy":
return command_mcp_access_policy(args)
if args.command == "workspace-hygiene":
return command_workspace_hygiene(args)
if args.command == "targeted-sync-audit":
return command_targeted_sync_audit(args)
parser.error(f"unknown command: {args.command}")
return 2

View File

@@ -0,0 +1,529 @@
"""Targeted Git synchronization audit for the Mais Humana round.
This module implements the safe portion of the manual repository sync
procedure for the specific repositories involved in the platform 15 work:
Mais Humana, MCPs Internos, and the operational nucleus/central. It never
resets, restores, rebases, pulls, merges, or pushes. It records status,
branch, origin, HEAD, ahead/behind, optional fetch errors, and a clear decision
about whether automatic synchronization is blocked.
"""
from __future__ import annotations
import csv
import io
import json
import os
import subprocess
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, Protocol, Sequence
from .models import GeneratedFile, as_plain_data, merge_unique, utc_now
from .redaction import redact_sensitive_text
class SyncAuditStatus(str, Enum):
"""Final status for one repo observation or the whole report."""
ALIGNED = "aligned"
AHEAD = "ahead"
BEHIND = "behind"
DIVERGED = "diverged"
DIRTY = "dirty"
CREDENTIAL_BLOCKED = "credential_blocked"
ACL_BLOCKED = "acl_blocked"
MISSING = "missing"
NOT_GIT = "not_git"
UNKNOWN = "unknown"
@dataclass(frozen=True, slots=True)
class GitCommandResult:
"""Output from one git command."""
command: tuple[str, ...]
exit_code: int
output: str
@property
def ok(self) -> bool:
return self.exit_code == 0
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class SyncAuditTarget:
"""Repository that must be checked in this round."""
target_id: str
path: str
expected_remote: str
role: str
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class RepoSyncObservation:
"""Safe synchronization observation for one repository."""
target: SyncAuditTarget
exists: bool
is_git: bool
branch: str
head: str
short_head: str
origin: str
status_short: str
dirty: bool
ahead: int | None
behind: int | None
fetch_attempted: bool
fetch_exit: int | None
fetch_output: str
status: SyncAuditStatus
blockers: tuple[str, ...]
decision: str
@property
def clean_for_auto_sync(self) -> bool:
return self.status in {SyncAuditStatus.ALIGNED, SyncAuditStatus.AHEAD, SyncAuditStatus.BEHIND} and not self.dirty
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class TargetedSyncAuditReport:
"""Full targeted sync audit report."""
report_id: str
generated_at: str
fetch: bool
targets: tuple[SyncAuditTarget, ...]
observations: tuple[RepoSyncObservation, ...]
summary: tuple[str, ...]
blockers: tuple[str, ...]
@property
def status(self) -> SyncAuditStatus:
if any(item.status in {SyncAuditStatus.CREDENTIAL_BLOCKED, SyncAuditStatus.ACL_BLOCKED, SyncAuditStatus.DIVERGED} for item in self.observations):
return SyncAuditStatus.DIVERGED
if any(item.status == SyncAuditStatus.DIRTY for item in self.observations):
return SyncAuditStatus.DIRTY
if any(item.status in {SyncAuditStatus.MISSING, SyncAuditStatus.NOT_GIT, SyncAuditStatus.UNKNOWN} for item in self.observations):
return SyncAuditStatus.UNKNOWN
if all(item.status == SyncAuditStatus.ALIGNED for item in self.observations):
return SyncAuditStatus.ALIGNED
return SyncAuditStatus.UNKNOWN
def to_dict(self) -> dict[str, Any]:
data = as_plain_data(self)
data["status"] = self.status.value
return data
class GitRunner(Protocol):
"""Small protocol for testable git execution."""
def run(self, repo_path: Path, args: Sequence[str]) -> GitCommandResult:
"""Run git args in repo_path."""
class SubprocessGitRunner:
"""Git runner using subprocess without shell interpolation."""
def run(self, repo_path: Path, args: Sequence[str]) -> GitCommandResult:
safe = repo_path.resolve(strict=False).as_posix()
command = ("git", "-c", f"safe.directory={safe}", "-C", str(repo_path), *args)
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
env["GIT_CEILING_DIRECTORIES"] = str(repo_path.resolve(strict=False).parent)
try:
completed = subprocess.run(command, cwd=str(repo_path), env=env, text=True, capture_output=True, timeout=120, check=False)
output = "\n".join(part for part in (completed.stdout, completed.stderr) if part).strip()
return GitCommandResult(tuple(args), int(completed.returncode), redact_sensitive_text(output))
except (OSError, subprocess.TimeoutExpired) as exc:
return GitCommandResult(tuple(args), 1, redact_sensitive_text(f"{type(exc).__name__}: {exc}"))
def default_sync_targets(
*,
project_root: Path,
mcp_repo_root: Path,
central_repo_root: Path,
) -> tuple[SyncAuditTarget, ...]:
"""Return the three repositories that matter for platform 15 closeout."""
return (
SyncAuditTarget(
target_id="mais-humana",
path=str(project_root),
expected_remote="https://git.ami.app.br/admin/tudo-para-ia-mais-humana.git",
role="projeto real da plataforma 15",
),
SyncAuditTarget(
target_id="mcps-internos",
path=str(mcp_repo_root),
expected_remote="https://git.ami.app.br/admin/tudo-para-ia-mcps-internos-plataform.git",
role="control-plane MCP que publica as tools Mais Humana",
),
SyncAuditTarget(
target_id="nucleo-central",
path=str(central_repo_root),
expected_remote="https://git.ami.app.br/admin/nucleo-gestao-operacional.git",
role="nucleo e central de ordem de servico",
),
)
def _first_line(value: str) -> str:
return (value or "").splitlines()[0].strip() if value else ""
def _short(head: str) -> str:
cleaned = head.strip()
return cleaned[:12] if len(cleaned) >= 12 else cleaned
def _parse_ahead_behind(output: str) -> tuple[int | None, int | None]:
parts = [part for part in output.replace("\t", " ").split(" ") if part.strip()]
if len(parts) != 2:
return None, None
try:
behind = int(parts[0])
ahead = int(parts[1])
return ahead, behind
except ValueError:
return None, None
def classify_observation(
*,
exists: bool,
is_git: bool,
dirty: bool,
ahead: int | None,
behind: int | None,
fetch_output: str,
command_outputs: Sequence[str],
) -> tuple[SyncAuditStatus, tuple[str, ...], str]:
"""Classify a repository without suggesting destructive reconciliation."""
combined = "\n".join([fetch_output, *command_outputs])
lower = combined.lower()
if not exists:
return SyncAuditStatus.MISSING, ("path_missing",), "materializar repositorio ausente antes de sincronizar"
if not is_git:
return SyncAuditStatus.NOT_GIT, ("not_a_git_repository",), "confirmar caminho real do repositorio"
if "sec_e_no_credentials" in lower or "could not read username" in lower or "authentication" in lower:
return SyncAuditStatus.CREDENTIAL_BLOCKED, ("git_credentials_unavailable",), "corrigir credencial Git/Schannel e repetir fetch/push seguro"
if "permission denied" in lower or "fetch_head" in lower or "index.lock" in lower:
return SyncAuditStatus.ACL_BLOCKED, ("git_acl_or_lock_blocked",), "corrigir ACL/locks de .git antes de qualquer merge/push"
if dirty:
if behind and behind > 0:
return SyncAuditStatus.DIVERGED, ("dirty_worktree_remote_ahead",), "bloquear pull automatico para preservar alteracao valida mais recente"
return SyncAuditStatus.DIRTY, ("dirty_worktree",), "commit escopado ou revisar alteracoes antes da sincronizacao"
if ahead is None or behind is None:
return SyncAuditStatus.UNKNOWN, ("ahead_behind_unknown",), "repetir rev-list apos fetch funcional"
if ahead > 0 and behind > 0:
return SyncAuditStatus.DIVERGED, (f"ahead={ahead} behind={behind}",), "reconciliacao manual obrigatoria sem reset/rebase destrutivo"
if ahead > 0:
return SyncAuditStatus.AHEAD, (f"ahead={ahead}",), "push seguro apos credencial valida e verificacao de lease"
if behind > 0:
return SyncAuditStatus.BEHIND, (f"behind={behind}",), "merge --ff-only permitido apenas com worktree limpa e decisao de precedencia"
return SyncAuditStatus.ALIGNED, (), "nenhuma acao de sincronizacao necessaria no estado observado"
def observe_repo(target: SyncAuditTarget, *, runner: GitRunner | None = None, fetch: bool = False) -> RepoSyncObservation:
"""Collect safe Git facts for one repository."""
git = runner or SubprocessGitRunner()
path = Path(target.path)
exists = path.exists()
is_git = exists and (path / ".git").exists()
fetch_result = GitCommandResult(("fetch", "--all", "--prune"), 127, "not_run")
branch = ""
head = ""
origin = ""
status_short = ""
dirty = False
ahead: int | None = None
behind: int | None = None
command_outputs: list[str] = []
if is_git:
if fetch:
fetch_result = git.run(path, ("fetch", "--all", "--prune"))
branch_result = git.run(path, ("branch", "--show-current"))
head_result = git.run(path, ("rev-parse", "HEAD"))
origin_result = git.run(path, ("remote", "get-url", "origin"))
status_result = git.run(path, ("status", "--short", "--branch"))
porcelain_result = git.run(path, ("status", "--porcelain", "--untracked-files=all"))
branch = _first_line(branch_result.output)
head = _first_line(head_result.output)
origin = _first_line(origin_result.output)
status_short = status_result.output.strip()
dirty = bool(porcelain_result.output.strip())
command_outputs.extend([branch_result.output, head_result.output, origin_result.output, status_result.output, porcelain_result.output])
if any("not a git repository" in result.output.lower() for result in (branch_result, head_result, status_result)):
is_git = False
dirty = False
if branch:
counts_result = git.run(path, ("rev-list", "--left-right", "--count", f"origin/{branch}...{branch}"))
ahead, behind = _parse_ahead_behind(counts_result.output)
command_outputs.append(counts_result.output)
status, blockers, decision = classify_observation(
exists=exists,
is_git=is_git,
dirty=dirty,
ahead=ahead,
behind=behind,
fetch_output=fetch_result.output if fetch else "",
command_outputs=command_outputs,
)
return RepoSyncObservation(
target=target,
exists=exists,
is_git=is_git,
branch=branch,
head=head,
short_head=_short(head),
origin=origin,
status_short=status_short,
dirty=dirty,
ahead=ahead,
behind=behind,
fetch_attempted=fetch,
fetch_exit=fetch_result.exit_code if fetch else None,
fetch_output=redact_sensitive_text(fetch_result.output if fetch else ""),
status=status,
blockers=blockers,
decision=decision,
)
def build_targeted_sync_audit(
*,
targets: Sequence[SyncAuditTarget],
runner: GitRunner | None = None,
fetch: bool = False,
) -> TargetedSyncAuditReport:
"""Build the targeted sync audit report."""
observations = tuple(observe_repo(target, runner=runner, fetch=fetch) for target in targets)
blockers = merge_unique(
f"{item.target.target_id}:{blocker}"
for item in observations
for blocker in item.blockers
)
summary = (
f"Repositories observed: {len(observations)}.",
f"Fetch attempted: {fetch}.",
f"Aligned: {sum(1 for item in observations if item.status == SyncAuditStatus.ALIGNED)}.",
f"Blocked or divergent: {sum(1 for item in observations if item.blockers)}.",
"No reset, restore, rebase, pull, merge, or push was executed by this audit.",
)
report_id = "targeted-sync-audit-" + str(abs(hash(json.dumps([item.to_dict() for item in observations], sort_keys=True))))[:12]
return TargetedSyncAuditReport(
report_id=report_id,
generated_at=utc_now(),
fetch=fetch,
targets=tuple(targets),
observations=observations,
summary=summary,
blockers=blockers,
)
def sync_audit_csv(report: TargetedSyncAuditReport) -> str:
"""Render observations as CSV."""
rows = [["target_id", "path", "branch", "head", "ahead", "behind", "dirty", "status", "origin", "fetch_exit", "decision", "blockers"]]
for item in report.observations:
rows.append(
[
item.target.target_id,
item.target.path,
item.branch,
item.short_head,
"" if item.ahead is None else str(item.ahead),
"" if item.behind is None else str(item.behind),
"yes" if item.dirty else "no",
item.status.value,
item.origin,
"" if item.fetch_exit is None else str(item.fetch_exit),
item.decision,
"; ".join(item.blockers),
]
)
buffer = io.StringIO()
writer = csv.writer(buffer, lineterminator="\n")
writer.writerows(rows)
return buffer.getvalue()
def sync_audit_markdown(report: TargetedSyncAuditReport) -> str:
"""Render the targeted sync audit as Markdown."""
lines = [
"# Targeted Git Sync Audit",
"",
f"- report_id: `{report.report_id}`",
f"- generated_at: `{report.generated_at}`",
f"- status: `{report.status.value}`",
f"- fetch: `{report.fetch}`",
"",
"## Summary",
"",
]
lines.extend(f"- {item}" for item in report.summary)
lines.extend(["", "## Repositories", ""])
for item in report.observations:
lines.extend(
[
f"### {item.target.target_id}",
"",
f"- role: {item.target.role}",
f"- path: `{item.target.path}`",
f"- exists: `{item.exists}`",
f"- git: `{item.is_git}`",
f"- branch: `{item.branch}`",
f"- head: `{item.short_head}`",
f"- ahead: `{item.ahead}`",
f"- behind: `{item.behind}`",
f"- dirty: `{item.dirty}`",
f"- origin: `{item.origin}`",
f"- expected_origin: `{item.target.expected_remote}`",
f"- status: `{item.status.value}`",
f"- decision: {item.decision}",
]
)
if item.fetch_attempted:
lines.append(f"- fetch_exit: `{item.fetch_exit}`")
if item.fetch_output:
lines.append(f"- fetch_output: `{item.fetch_output[:500]}`")
if item.blockers:
lines.append("- blockers:")
lines.extend(f" - `{blocker}`" for blocker in item.blockers)
lines.append("")
lines.extend(["## Blockers", ""])
if report.blockers:
lines.extend(f"- `{item}`" for item in report.blockers)
else:
lines.append("- Nenhum blocker de sincronizacao no escopo auditado.")
return "\n".join(lines).strip() + "\n"
def sync_audit_artifact_records(project_root: Path, central_platform_folder: Path | None = None) -> tuple[GeneratedFile, ...]:
"""Return semantic records for sync audit artifacts."""
records = [
GeneratedFile(
path=str(project_root / "dados" / "targeted-sync-audit.json"),
description="Auditoria Git segura dos repositorios da rodada Mais Humana.",
function="targeted git sync audit",
file_type="json",
changed_by="mais_humana.targeted_sync_audit",
change_summary="Registrado status Git, fetch, ahead/behind, bloqueios de credencial/ACL e decisao segura.",
relation_to_order="0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais",
),
GeneratedFile(
path=str(project_root / "matrizes" / "targeted-sync-audit.csv"),
description="Matriz de sincronizacao Git escopada.",
function="targeted git sync matrix",
file_type="csv",
changed_by="mais_humana.targeted_sync_audit",
change_summary="Criada matriz de repos, hashes, divergencias e bloqueios.",
relation_to_order="0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais",
),
GeneratedFile(
path=str(project_root / "ecossistema" / "TARGETED-SYNC-AUDIT.md"),
description="Relatorio humano da sincronizacao Git escopada.",
function="targeted git sync report",
file_type="markdown",
changed_by="mais_humana.targeted_sync_audit",
change_summary="Criado relatorio de sincronizacao sem operacao destrutiva.",
relation_to_order="0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais",
),
]
if central_platform_folder is not None:
records.append(
GeneratedFile(
path=str(central_platform_folder / "reports" / "EXECUTADO__targeted-sync-audit.md"),
description="Copia central da auditoria Git escopada.",
function="targeted git sync central report",
file_type="markdown",
changed_by="mais_humana.targeted_sync_audit",
change_summary="Registrado estado Git da rodada na pasta central.",
relation_to_order="0033_EXECUTIVA__sincronizar-git-mais-humana-mcps-central-com-credenciais",
)
)
return tuple(records)
def write_sync_audit_artifacts(
report: TargetedSyncAuditReport,
project_root: Path,
*,
central_platform_folder: Path | None = None,
) -> tuple[GeneratedFile, ...]:
"""Write project and optional central sync audit artifacts."""
targets: list[tuple[Path, str]] = [
(project_root / "dados" / "targeted-sync-audit.json", json.dumps(report.to_dict(), ensure_ascii=False, indent=2, sort_keys=True)),
(project_root / "matrizes" / "targeted-sync-audit.csv", sync_audit_csv(report)),
(project_root / "ecossistema" / "TARGETED-SYNC-AUDIT.md", sync_audit_markdown(report)),
]
records = list(sync_audit_artifact_records(project_root, central_platform_folder))
central_failures: list[dict[str, str]] = []
if central_platform_folder is not None:
targets.append((central_platform_folder / "reports" / "EXECUTADO__targeted-sync-audit.md", sync_audit_markdown(report)))
for path, content in targets:
try:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding="utf-8")
except OSError as exc:
if central_platform_folder is not None and central_platform_folder in path.parents:
central_failures.append({"path": str(path), "error": f"{type(exc).__name__}: {exc}"})
continue
raise
if central_failures:
status_path = project_root / "dados" / "targeted-sync-audit-central-write-status.json"
status_path.write_text(
json.dumps({"generatedAt": utc_now(), "ok": False, "failures": central_failures}, ensure_ascii=False, indent=2, sort_keys=True),
encoding="utf-8",
)
records.append(
GeneratedFile(
path=str(status_path),
description="Status de escrita central da auditoria Git escopada.",
function="targeted git sync central write status",
file_type="json",
changed_by="mais_humana.targeted_sync_audit",
change_summary="Registrada falha de escrita central sem abortar artefatos do projeto real.",
relation_to_order="0034_EXECUTIVA__corrigir-acl-escrita-central-e-sql-semantico-plataforma-15",
)
)
return tuple(records)
def run_targeted_sync_audit(
*,
project_root: Path,
mcp_repo_root: Path,
central_repo_root: Path,
central_platform_folder: Path | None = None,
fetch: bool = False,
runner: GitRunner | None = None,
) -> tuple[TargetedSyncAuditReport, tuple[GeneratedFile, ...]]:
"""Run the safe targeted sync audit and write artifacts."""
targets = default_sync_targets(project_root=project_root, mcp_repo_root=mcp_repo_root, central_repo_root=central_repo_root)
report = build_targeted_sync_audit(targets=targets, runner=runner, fetch=fetch)
records = write_sync_audit_artifacts(report, project_root, central_platform_folder=central_platform_folder)
return report, records

View File

@@ -0,0 +1,535 @@
"""Local workspace hygiene checks for service-order closeout.
The Mais Humana rounds generate Python test scratch folders and may touch
JavaScript/Cloudflare workspaces while validating MCP publication. This module
turns cleanup into an auditable operation instead of an informal shell step:
it verifies target paths, deletes only approved local artifacts when requested,
and writes redacted project/central reports that explain any ACL retention.
"""
from __future__ import annotations
import csv
import io
import json
import os
import shutil
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, Sequence
from .models import GeneratedFile, as_plain_data, merge_unique, utc_now
DEFAULT_LOCAL_ARTIFACTS = (
".test-tmp",
"node_modules",
)
class HygieneStatus(str, Enum):
"""Result for a cleanup target or whole report."""
PASSED = "passed"
PARTIAL = "partial"
BLOCKED = "blocked"
NOT_FOUND = "not_found"
NOT_RUN = "not_run"
class HygieneActionKind(str, Enum):
"""Action planned or executed for one local artifact."""
DELETE_DIRECTORY = "delete_directory"
DELETE_FILE = "delete_file"
VERIFY_ABSENT = "verify_absent"
BLOCK_UNSAFE_PATH = "block_unsafe_path"
@dataclass(frozen=True, slots=True)
class HygieneTarget:
"""Approved local cleanup target."""
target_id: str
relative_path: str
reason: str
required_absent: bool = True
delete_when_apply: bool = True
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class PathFootprint:
"""Best-effort path size and child count."""
exists: bool
is_dir: bool
is_file: bool
child_count: int
byte_count: int
errors: tuple[str, ...]
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class HygieneAction:
"""Observed cleanup action for one target."""
target_id: str
path: str
action: HygieneActionKind
status: HygieneStatus
applied: bool
deleted: bool
footprint_before: PathFootprint
footprint_after: PathFootprint
error: str = ""
note: str = ""
@property
def clean(self) -> bool:
return self.status in {HygieneStatus.PASSED, HygieneStatus.NOT_FOUND} and not self.footprint_after.exists
def to_dict(self) -> dict[str, Any]:
return as_plain_data(self)
@dataclass(frozen=True, slots=True)
class WorkspaceHygieneReport:
"""Full hygiene report for a platform workspace."""
report_id: str
generated_at: str
project_root: str
central_platform_folder: str
apply: bool
targets: tuple[HygieneTarget, ...]
actions: tuple[HygieneAction, ...]
summary: tuple[str, ...]
blockers: tuple[str, ...]
@property
def status(self) -> HygieneStatus:
if any(action.status == HygieneStatus.BLOCKED for action in self.actions):
return HygieneStatus.BLOCKED
if any(action.status in {HygieneStatus.PARTIAL, HygieneStatus.NOT_RUN} for action in self.actions):
return HygieneStatus.PARTIAL
return HygieneStatus.PASSED
@property
def clean(self) -> bool:
return all(action.clean for action in self.actions)
def to_dict(self) -> dict[str, Any]:
data = as_plain_data(self)
data["status"] = self.status.value
data["clean"] = self.clean
return data
def default_hygiene_targets() -> tuple[HygieneTarget, ...]:
"""Return local artifact targets allowed for automated cleanup."""
return (
HygieneTarget(
target_id="python-test-temp",
relative_path=".test-tmp",
reason="scratch directory created by Python unit tests; must not survive closeout",
),
HygieneTarget(
target_id="node-dependencies",
relative_path="node_modules",
reason="local Node dependency directory; must not be versioned or retained after local tests",
),
)
def _norm(path: Path) -> str:
return os.path.normcase(os.path.abspath(str(path)))
def safe_child_path(root: Path, relative_path: str) -> Path:
"""Resolve a target and reject paths outside the project root."""
if Path(relative_path).is_absolute():
raise ValueError(f"absolute cleanup target is not allowed: {relative_path}")
root_resolved = root.resolve(strict=False)
target = (root_resolved / relative_path).resolve(strict=False)
try:
common = os.path.commonpath([_norm(root_resolved), _norm(target)])
except ValueError as exc:
raise ValueError(f"cleanup target crosses drive boundary: {relative_path}") from exc
if common != _norm(root_resolved):
raise ValueError(f"cleanup target escapes project root: {relative_path}")
return target
def _footprint(path: Path, *, max_errors: int = 8) -> PathFootprint:
if not path.exists():
return PathFootprint(False, False, False, 0, 0, ())
if path.is_file():
try:
size = path.stat().st_size
return PathFootprint(True, False, True, 1, int(size), ())
except OSError as exc:
return PathFootprint(True, False, True, 1, 0, (f"{type(exc).__name__}: {exc}",))
child_count = 0
byte_count = 0
errors: list[str] = []
for current_root, dirnames, filenames in os.walk(path, topdown=True, onerror=lambda exc: errors.append(f"{type(exc).__name__}: {exc}")):
child_count += len(dirnames) + len(filenames)
for filename in filenames:
try:
byte_count += int((Path(current_root) / filename).stat().st_size)
except OSError as exc:
if len(errors) < max_errors:
errors.append(f"{type(exc).__name__}: {exc}")
if len(errors) >= max_errors:
errors.append("error_limit_reached")
break
return PathFootprint(True, True, False, child_count, byte_count, tuple(errors[:max_errors]))
def _delete_path(path: Path) -> tuple[bool, str]:
try:
if not path.exists():
return False, ""
if path.is_dir():
shutil.rmtree(path)
return True, ""
path.unlink()
return True, ""
except OSError as exc:
return False, f"{type(exc).__name__}: {exc}"
def run_hygiene_actions(
project_root: Path,
*,
apply: bool = False,
targets: Sequence[HygieneTarget] | None = None,
) -> tuple[HygieneAction, ...]:
"""Inspect and optionally remove approved local artifacts."""
action_rows: list[HygieneAction] = []
for target in tuple(targets or default_hygiene_targets()):
try:
path = safe_child_path(project_root, target.relative_path)
except ValueError as exc:
empty = PathFootprint(False, False, False, 0, 0, ())
action_rows.append(
HygieneAction(
target_id=target.target_id,
path=str(project_root / target.relative_path),
action=HygieneActionKind.BLOCK_UNSAFE_PATH,
status=HygieneStatus.BLOCKED,
applied=False,
deleted=False,
footprint_before=empty,
footprint_after=empty,
error=str(exc),
note="unsafe path blocked before filesystem write",
)
)
continue
before = _footprint(path)
if not before.exists:
action_rows.append(
HygieneAction(
target_id=target.target_id,
path=str(path),
action=HygieneActionKind.VERIFY_ABSENT,
status=HygieneStatus.NOT_FOUND,
applied=False,
deleted=False,
footprint_before=before,
footprint_after=before,
note="target already absent",
)
)
continue
kind = HygieneActionKind.DELETE_DIRECTORY if before.is_dir else HygieneActionKind.DELETE_FILE
if not apply or not target.delete_when_apply:
action_rows.append(
HygieneAction(
target_id=target.target_id,
path=str(path),
action=kind,
status=HygieneStatus.NOT_RUN,
applied=False,
deleted=False,
footprint_before=before,
footprint_after=before,
note="dry run; use --apply to remove approved artifact",
)
)
continue
deleted, error = _delete_path(path)
after = _footprint(path)
status = HygieneStatus.PASSED if deleted and not after.exists else HygieneStatus.BLOCKED
note = "removed approved local artifact" if status == HygieneStatus.PASSED else "artifact retained by ACL or filesystem lock"
action_rows.append(
HygieneAction(
target_id=target.target_id,
path=str(path),
action=kind,
status=status,
applied=True,
deleted=deleted,
footprint_before=before,
footprint_after=after,
error=error,
note=note,
)
)
return tuple(action_rows)
def build_workspace_hygiene_report(
project_root: Path,
*,
central_platform_folder: Path | None = None,
apply: bool = False,
targets: Sequence[HygieneTarget] | None = None,
) -> WorkspaceHygieneReport:
"""Build a hygiene report for project closeout."""
target_set = tuple(targets or default_hygiene_targets())
actions = run_hygiene_actions(project_root, apply=apply, targets=target_set)
blockers = merge_unique(
f"{action.target_id}:{action.error or action.note}"
for action in actions
if action.status == HygieneStatus.BLOCKED
)
removed = sum(1 for action in actions if action.deleted)
already_absent = sum(1 for action in actions if action.status == HygieneStatus.NOT_FOUND)
retained = sum(1 for action in actions if action.footprint_after.exists)
summary = (
f"Targets evaluated: {len(actions)}.",
f"Apply mode: {apply}.",
f"Removed artifacts: {removed}.",
f"Already absent: {already_absent}.",
f"Artifacts still present: {retained}.",
"Only approved project-local artifacts are eligible for deletion.",
)
seed = json.dumps(
{
"projectRoot": str(project_root),
"actions": [action.to_dict() for action in actions],
"generatedAt": utc_now(),
},
ensure_ascii=False,
sort_keys=True,
)
report_id = "workspace-hygiene-" + str(abs(hash(seed)))[:12]
return WorkspaceHygieneReport(
report_id=report_id,
generated_at=utc_now(),
project_root=str(project_root),
central_platform_folder=str(central_platform_folder or ""),
apply=apply,
targets=target_set,
actions=actions,
summary=summary,
blockers=blockers,
)
def hygiene_csv(report: WorkspaceHygieneReport) -> str:
"""Render hygiene target status as CSV."""
rows = [["target_id", "path", "action", "status", "applied", "deleted", "exists_after", "children_before", "bytes_before", "error", "note"]]
for action in report.actions:
rows.append(
[
action.target_id,
action.path,
action.action.value,
action.status.value,
"yes" if action.applied else "no",
"yes" if action.deleted else "no",
"yes" if action.footprint_after.exists else "no",
str(action.footprint_before.child_count),
str(action.footprint_before.byte_count),
action.error,
action.note,
]
)
buffer = io.StringIO()
writer = csv.writer(buffer, lineterminator="\n")
writer.writerows(rows)
return buffer.getvalue()
def hygiene_markdown(report: WorkspaceHygieneReport) -> str:
"""Render human-readable hygiene evidence."""
lines = [
"# Workspace Hygiene Report",
"",
f"- report_id: `{report.report_id}`",
f"- generated_at: `{report.generated_at}`",
f"- project_root: `{report.project_root}`",
f"- central_platform_folder: `{report.central_platform_folder}`",
f"- status: `{report.status.value}`",
f"- clean: `{report.clean}`",
f"- apply: `{report.apply}`",
"",
"## Summary",
"",
]
lines.extend(f"- {item}" for item in report.summary)
lines.extend(["", "## Targets", ""])
for action in report.actions:
lines.extend(
[
f"### {action.target_id}",
"",
f"- path: `{action.path}`",
f"- action: `{action.action.value}`",
f"- status: `{action.status.value}`",
f"- applied: `{action.applied}`",
f"- deleted: `{action.deleted}`",
f"- exists_after: `{action.footprint_after.exists}`",
f"- children_before: `{action.footprint_before.child_count}`",
f"- bytes_before: `{action.footprint_before.byte_count}`",
f"- note: {action.note}",
]
)
if action.error:
lines.append(f"- error: `{action.error}`")
if action.footprint_before.errors:
lines.append("- footprint_errors:")
lines.extend(f" - `{item}`" for item in action.footprint_before.errors)
lines.append("")
lines.extend(["## Blockers", ""])
if report.blockers:
lines.extend(f"- `{item}`" for item in report.blockers)
else:
lines.append("- Nenhum blocker de higiene local.")
return "\n".join(lines).strip() + "\n"
def hygiene_artifact_records(project_root: Path, central_platform_folder: Path | None = None) -> tuple[GeneratedFile, ...]:
"""Return semantic records for hygiene artifacts."""
records = [
GeneratedFile(
path=str(project_root / "dados" / "workspace-hygiene-report.json"),
description="Relatorio estruturado de limpeza operacional local.",
function="workspace hygiene report",
file_type="json",
changed_by="mais_humana.workspace_hygiene",
change_summary="Registrada limpeza de .test-tmp e node_modules com bloqueios ACL quando houver.",
relation_to_order="0036_EXECUTIVA__normalizar-limpeza-test-tmp-e-acl-local",
),
GeneratedFile(
path=str(project_root / "matrizes" / "workspace-hygiene-targets.csv"),
description="Matriz de alvos de higiene local e estado final.",
function="workspace hygiene matrix",
file_type="csv",
changed_by="mais_humana.workspace_hygiene",
change_summary="Criada matriz auditavel de artefatos locais removidos, ausentes ou retidos.",
relation_to_order="0036_EXECUTIVA__normalizar-limpeza-test-tmp-e-acl-local",
),
GeneratedFile(
path=str(project_root / "ecossistema" / "WORKSPACE-HYGIENE-REPORT.md"),
description="Relatorio humano de higiene local da rodada.",
function="workspace hygiene human report",
file_type="markdown",
changed_by="mais_humana.workspace_hygiene",
change_summary="Criado relatorio de fechamento de limpeza operacional.",
relation_to_order="0036_EXECUTIVA__normalizar-limpeza-test-tmp-e-acl-local",
),
]
if central_platform_folder is not None:
records.append(
GeneratedFile(
path=str(central_platform_folder / "reports" / "EXECUTADO__workspace-hygiene.md"),
description="Copia central da higiene local da plataforma Mais Humana.",
function="workspace hygiene central report",
file_type="markdown",
changed_by="mais_humana.workspace_hygiene",
change_summary="Registrado estado de .test-tmp e node_modules na pasta central.",
relation_to_order="0036_EXECUTIVA__normalizar-limpeza-test-tmp-e-acl-local",
)
)
return tuple(records)
def write_hygiene_artifacts(
report: WorkspaceHygieneReport,
project_root: Path,
*,
central_platform_folder: Path | None = None,
) -> tuple[GeneratedFile, ...]:
"""Write project and optional central hygiene artifacts."""
targets: list[tuple[Path, str]] = [
(project_root / "dados" / "workspace-hygiene-report.json", json.dumps(report.to_dict(), ensure_ascii=False, indent=2, sort_keys=True)),
(project_root / "matrizes" / "workspace-hygiene-targets.csv", hygiene_csv(report)),
(project_root / "ecossistema" / "WORKSPACE-HYGIENE-REPORT.md", hygiene_markdown(report)),
]
records = list(hygiene_artifact_records(project_root, central_platform_folder))
central_failures: list[dict[str, str]] = []
if central_platform_folder is not None:
targets.append((central_platform_folder / "reports" / "EXECUTADO__workspace-hygiene.md", hygiene_markdown(report)))
for path, content in targets:
try:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding="utf-8")
except OSError as exc:
if central_platform_folder is not None and central_platform_folder in path.parents:
central_failures.append({"path": str(path), "error": f"{type(exc).__name__}: {exc}"})
continue
raise
if central_failures:
status_path = project_root / "dados" / "workspace-hygiene-central-write-status.json"
status_path.write_text(
json.dumps(
{
"generatedAt": utc_now(),
"centralPlatformFolder": str(central_platform_folder),
"ok": False,
"failures": central_failures,
},
ensure_ascii=False,
indent=2,
sort_keys=True,
),
encoding="utf-8",
)
records.append(
GeneratedFile(
path=str(status_path),
description="Status de escrita central do relatorio de higiene local.",
function="workspace hygiene central write status",
file_type="json",
changed_by="mais_humana.workspace_hygiene",
change_summary="Registrada falha de escrita central sem abortar artefatos do projeto real.",
relation_to_order="0034_EXECUTIVA__corrigir-acl-escrita-central-e-sql-semantico-plataforma-15",
)
)
return tuple(records)
def run_workspace_hygiene(
*,
project_root: Path,
central_platform_folder: Path | None = None,
apply: bool = False,
) -> tuple[WorkspaceHygieneReport, tuple[GeneratedFile, ...]]:
"""Execute the hygiene gate and write artifacts."""
report = build_workspace_hygiene_report(project_root, central_platform_folder=central_platform_folder, apply=apply)
records = write_hygiene_artifacts(report, project_root, central_platform_folder=central_platform_folder)
return report, records

View File

@@ -0,0 +1,156 @@
from __future__ import annotations
import json
import unittest
from pathlib import Path
from typing import Sequence
from mais_humana.cli import main
from mais_humana.targeted_sync_audit import (
GitCommandResult,
SyncAuditStatus,
SyncAuditTarget,
TargetedSyncAuditReport,
build_targeted_sync_audit,
classify_observation,
observe_repo,
run_targeted_sync_audit,
sync_audit_csv,
sync_audit_markdown,
)
from tests.helpers import make_tmp
class FakeGitRunner:
def __init__(self) -> None:
self.results: dict[tuple[str, str], GitCommandResult] = {}
def set_result(self, repo: Path, args: Sequence[str], output: str, exit_code: int = 0) -> None:
self.results[(str(repo), " ".join(args))] = GitCommandResult(tuple(args), exit_code, output)
def run(self, repo_path: Path, args: Sequence[str]) -> GitCommandResult:
return self.results.get((str(repo_path), " ".join(args)), GitCommandResult(tuple(args), 0, ""))
def make_git_repo(root: Path, name: str) -> Path:
repo = root / name
(repo / ".git").mkdir(parents=True)
return repo
def target(path: Path, target_id: str = "repo") -> SyncAuditTarget:
return SyncAuditTarget(target_id, str(path), "https://git.ami.app.br/admin/repo.git", "unit test repo")
def configure_clean_runner(runner: FakeGitRunner, repo: Path, *, ahead_behind: str = "0 0") -> None:
runner.set_result(repo, ("branch", "--show-current"), "main\n")
runner.set_result(repo, ("rev-parse", "HEAD"), "a" * 40 + "\n")
runner.set_result(repo, ("remote", "get-url", "origin"), "https://git.ami.app.br/admin/repo.git\n")
runner.set_result(repo, ("status", "--short", "--branch"), "## main...origin/main\n")
runner.set_result(repo, ("status", "--porcelain", "--untracked-files=all"), "")
runner.set_result(repo, ("rev-list", "--left-right", "--count", "origin/main...main"), ahead_behind)
class TargetedSyncAuditTests(unittest.TestCase):
def test_classify_missing_and_not_git(self) -> None:
status, blockers, decision = classify_observation(
exists=False,
is_git=False,
dirty=False,
ahead=None,
behind=None,
fetch_output="",
command_outputs=(),
)
self.assertEqual(status, SyncAuditStatus.MISSING)
self.assertIn("path_missing", blockers)
self.assertIn("materializar", decision)
def test_observe_clean_repo_aligned(self) -> None:
root = make_tmp()
repo = make_git_repo(root, "repo")
runner = FakeGitRunner()
configure_clean_runner(runner, repo)
observation = observe_repo(target(repo), runner=runner)
self.assertEqual(observation.status, SyncAuditStatus.ALIGNED)
self.assertEqual(observation.branch, "main")
self.assertEqual(observation.short_head, "a" * 12)
self.assertTrue(observation.clean_for_auto_sync)
def test_fetch_credential_error_blocks_sync(self) -> None:
root = make_tmp()
repo = make_git_repo(root, "repo")
runner = FakeGitRunner()
configure_clean_runner(runner, repo)
runner.set_result(repo, ("fetch", "--all", "--prune"), "fatal: schannel SEC_E_NO_CREDENTIALS", 128)
observation = observe_repo(target(repo), runner=runner, fetch=True)
self.assertEqual(observation.status, SyncAuditStatus.CREDENTIAL_BLOCKED)
self.assertIn("git_credentials_unavailable", observation.blockers)
def test_dirty_and_remote_ahead_is_diverged(self) -> None:
root = make_tmp()
repo = make_git_repo(root, "repo")
runner = FakeGitRunner()
configure_clean_runner(runner, repo, ahead_behind="2 0")
runner.set_result(repo, ("status", "--porcelain", "--untracked-files=all"), " M README.md\n")
observation = observe_repo(target(repo), runner=runner)
self.assertEqual(observation.status, SyncAuditStatus.DIVERGED)
self.assertIn("dirty_worktree_remote_ahead", observation.blockers)
def test_build_report_and_render_outputs(self) -> None:
root = make_tmp()
repo = make_git_repo(root, "repo")
runner = FakeGitRunner()
configure_clean_runner(runner, repo)
report = build_targeted_sync_audit(targets=(target(repo),), runner=runner)
self.assertIsInstance(report, TargetedSyncAuditReport)
self.assertEqual(report.status, SyncAuditStatus.ALIGNED)
self.assertIn("Targeted Git Sync Audit", sync_audit_markdown(report))
self.assertIn("target_id,path,branch", sync_audit_csv(report))
def test_run_targeted_sync_audit_writes_project_and_central_artifacts(self) -> None:
root = make_tmp()
project = make_git_repo(root, "tudo-para-ia-mais-humana")
mcp = make_git_repo(root, "tudo-para-ia-mcps-internos-plataform")
central_repo = make_git_repo(root, "nucleo-gestao-operacional")
central = root / "central" / "15_repo_tudo-para-ia-mais-humana-platform"
runner = FakeGitRunner()
for repo in (project, mcp, central_repo):
configure_clean_runner(runner, repo)
report, records = run_targeted_sync_audit(
project_root=project,
mcp_repo_root=mcp,
central_repo_root=central_repo,
central_platform_folder=central,
runner=runner,
)
self.assertEqual(report.status, SyncAuditStatus.ALIGNED)
self.assertTrue((project / "dados" / "targeted-sync-audit.json").exists())
self.assertTrue((project / "matrizes" / "targeted-sync-audit.csv").exists())
self.assertTrue((project / "ecossistema" / "TARGETED-SYNC-AUDIT.md").exists())
self.assertTrue((central / "reports" / "EXECUTADO__targeted-sync-audit.md").exists())
self.assertGreaterEqual(len(records), 4)
def test_cli_targeted_sync_audit_writes_payload(self) -> None:
root = make_tmp()
project = make_git_repo(root, "project")
mcp = make_git_repo(root, "mcp")
central = make_git_repo(root, "central")
code = main(
[
"targeted-sync-audit",
"--project-root",
str(project),
"--mcp-repo-root",
str(mcp),
"--central-repo-root",
str(central),
]
)
self.assertEqual(code, 0)
payload = json.loads((project / "dados" / "targeted-sync-audit.json").read_text(encoding="utf-8"))
self.assertEqual(len(payload["observations"]), 3)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,84 @@
from __future__ import annotations
import json
import unittest
from pathlib import Path
from mais_humana.cli import main
from mais_humana.workspace_hygiene import (
HygieneStatus,
HygieneTarget,
build_workspace_hygiene_report,
hygiene_csv,
hygiene_markdown,
run_workspace_hygiene,
safe_child_path,
)
from tests.helpers import make_tmp
class WorkspaceHygieneTests(unittest.TestCase):
def make_project_with_artifacts(self) -> Path:
project = make_tmp()
scratch = project / ".test-tmp" / "case"
deps = project / "node_modules" / "pkg"
scratch.mkdir(parents=True)
deps.mkdir(parents=True)
(scratch / "evidence.tmp").write_text("temporary evidence", encoding="utf-8")
(deps / "package.json").write_text("{}", encoding="utf-8")
return project
def test_safe_child_path_rejects_escape(self) -> None:
project = make_tmp()
with self.assertRaises(ValueError):
safe_child_path(project, "..\\outside")
def test_dry_run_reports_present_artifacts_without_deleting(self) -> None:
project = self.make_project_with_artifacts()
report = build_workspace_hygiene_report(project, apply=False)
self.assertEqual(report.status, HygieneStatus.PARTIAL)
self.assertFalse(report.clean)
self.assertTrue((project / ".test-tmp").exists())
self.assertTrue((project / "node_modules").exists())
self.assertIn("dry run", hygiene_markdown(report))
self.assertIn("target_id,path,action,status", hygiene_csv(report))
def test_apply_removes_approved_artifacts(self) -> None:
project = self.make_project_with_artifacts()
report = build_workspace_hygiene_report(project, apply=True)
self.assertEqual(report.status, HygieneStatus.PASSED)
self.assertTrue(report.clean)
self.assertFalse((project / ".test-tmp").exists())
self.assertFalse((project / "node_modules").exists())
self.assertTrue(all(action.deleted for action in report.actions))
def test_custom_unsafe_target_is_blocked_before_delete(self) -> None:
project = make_tmp()
target = HygieneTarget("unsafe", "..\\outside", "unit test")
report = build_workspace_hygiene_report(project, apply=True, targets=(target,))
self.assertEqual(report.status, HygieneStatus.BLOCKED)
self.assertTrue(report.blockers)
self.assertIn("escapes project root", report.actions[0].error)
def test_run_workspace_hygiene_writes_project_and_central_artifacts(self) -> None:
project = self.make_project_with_artifacts()
central = make_tmp() / "central" / "15_repo_tudo-para-ia-mais-humana-platform"
report, records = run_workspace_hygiene(project_root=project, central_platform_folder=central, apply=True)
self.assertEqual(report.status, HygieneStatus.PASSED)
self.assertTrue((project / "dados" / "workspace-hygiene-report.json").exists())
self.assertTrue((project / "matrizes" / "workspace-hygiene-targets.csv").exists())
self.assertTrue((project / "ecossistema" / "WORKSPACE-HYGIENE-REPORT.md").exists())
self.assertTrue((central / "reports" / "EXECUTADO__workspace-hygiene.md").exists())
self.assertGreaterEqual(len(records), 4)
def test_cli_workspace_hygiene_writes_json_payload(self) -> None:
project = self.make_project_with_artifacts()
code = main(["workspace-hygiene", "--project-root", str(project), "--apply"])
self.assertEqual(code, 0)
payload = json.loads((project / "dados" / "workspace-hygiene-report.json").read_text(encoding="utf-8"))
self.assertEqual(payload["status"], "passed")
self.assertEqual(payload["clean"], True)
if __name__ == "__main__":
unittest.main()