feat: fundar plataforma mais humana
This commit is contained in:
548
src/mais_humana/reports.py
Normal file
548
src/mais_humana/reports.py
Normal file
@@ -0,0 +1,548 @@
|
||||
"""High-level orchestration for report generation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Sequence
|
||||
|
||||
from .catalog import HUMAN_PROFILES, PLATFORMS
|
||||
from .charts import matrix_heatmap_svg, platform_bar_svg, profile_radar_svg
|
||||
from .commands import base_validation_commands, commands_markdown, platform_validation_commands
|
||||
from .contract import build_contract, contract_markdown
|
||||
from .docx_writer import DocxDocument, write_lines_docx
|
||||
from .acceptance import acceptance_markdown, build_acceptance_report
|
||||
from .evidence_index import build_evidence_index, evidence_markdown
|
||||
from .evidence_graph import build_evidence_graph
|
||||
from .exit_order_compiler import compile_governance_orders, compiled_orders_markdown, order_coverage_rows, source_candidate_rows
|
||||
from .governance_diff import (
|
||||
diff_governance_snapshots,
|
||||
governance_delta_markdown,
|
||||
governance_delta_rows,
|
||||
load_governance_snapshot,
|
||||
snapshot_from_portfolio,
|
||||
write_governance_snapshot,
|
||||
)
|
||||
from .governance_engine import build_governance_portfolio, rows_to_csv
|
||||
from .governance_exports import governance_exports, write_central_lifecycle_exports, write_governance_exports
|
||||
from .governance_scenarios import build_scenario_portfolio
|
||||
from .governance_storage import write_governance_semantic_state
|
||||
from .human_readiness_registry import build_readiness_registry
|
||||
from .matrix import build_global_recommendations, build_matrix, build_platform_reports, matrix_table
|
||||
from .models import EcosystemHumanReport, GeneratedFile, PlatformHumanReport, ReportBundle, as_plain_data
|
||||
from .narratives import ecosystem_markdown, ecosystem_summary_lines, platform_markdown, platform_report_lines
|
||||
from .orders import audit_markdown, build_exit_orders, executed_order_markdown, pending_markdown, write_orders
|
||||
from .html_export import write_index_html
|
||||
from .insights import build_insights, dependency_dot, insights_markdown
|
||||
from .operational_dossier import (
|
||||
build_execution_round_dossier,
|
||||
dossier_compact_rows,
|
||||
dossier_to_markdown,
|
||||
order_justifications_markdown,
|
||||
write_csv_lines,
|
||||
)
|
||||
from .playbooks import build_playbooks, playbooks_markdown
|
||||
from .portfolio_queries import build_operational_questions
|
||||
from .quality import evaluate_ecosystem_quality, quality_to_markdown
|
||||
from .questions import questions_for_ecosystem, questions_markdown
|
||||
from .redaction import redaction_markdown, scan_generated_artifacts
|
||||
from .round_assurance import assurance_markdown, assurance_rows, build_assurance_suite
|
||||
from .runtime_budget import build_round_line_budget
|
||||
from .scanner import scan_ecosystem
|
||||
from .snapshots import diff_snapshots, load_snapshot, snapshot_delta_markdown, snapshot_from_reports, write_snapshot
|
||||
from .status_pages import write_central_status_pages
|
||||
from .service_order_lifecycle import build_round_execution_package
|
||||
from .status_reconciler import build_reconciled_status, write_reconciled_status
|
||||
from .storage import write_semantic_state
|
||||
from .workflow_registry import build_workflow_portfolio
|
||||
|
||||
|
||||
def repo_paths(project_root: Path) -> dict[str, Path]:
|
||||
return {
|
||||
"platform_markdown": project_root / "plataformas",
|
||||
"ecosystem": project_root / "ecossistema",
|
||||
"docx_platforms": project_root / "relatorios-docx" / "plataformas",
|
||||
"docx_root": project_root / "relatorios-docx",
|
||||
"charts": project_root / "graficos",
|
||||
"matrices": project_root / "matrizes",
|
||||
"data": project_root / "dados",
|
||||
"orders": project_root / "os-orientadoras",
|
||||
"goals": project_root / "metas-humanas",
|
||||
"questions": project_root / "pessoas-e-papeis",
|
||||
"html": project_root / "ecossistema",
|
||||
}
|
||||
|
||||
|
||||
def ensure_project_dirs(project_root: Path) -> None:
|
||||
for path in repo_paths(project_root).values():
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
for extra in ("paradigma", "pessoas-e-papeis", "telas-e-relatorios", "templates"):
|
||||
(project_root / extra).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def generated_file(path: Path, project_root: Path, description: str, function: str, file_type: str, relation: str) -> GeneratedFile:
|
||||
try:
|
||||
rel = path.relative_to(project_root)
|
||||
except ValueError:
|
||||
rel = path
|
||||
return GeneratedFile(
|
||||
path=str(rel).replace("\\", "/"),
|
||||
description=description,
|
||||
function=function,
|
||||
file_type=file_type,
|
||||
changed_by="mais_humana.generate",
|
||||
change_summary=description,
|
||||
relation_to_order=relation,
|
||||
)
|
||||
|
||||
|
||||
def write_json(path: Path, payload: object) -> Path:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(as_plain_data(payload), ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
|
||||
return path
|
||||
|
||||
|
||||
def write_platform_docx(path: Path, report: PlatformHumanReport) -> Path:
|
||||
doc = DocxDocument(title=f"Relatorio humano - {report.platform.title}")
|
||||
doc.heading("Missao", 2)
|
||||
doc.paragraph(report.platform.mission)
|
||||
doc.heading("Sintese", 2)
|
||||
doc.paragraph(report.summary)
|
||||
doc.heading("Estado atual", 2)
|
||||
for item in report.current_state:
|
||||
doc.bullet(item)
|
||||
doc.heading("Lacunas humanas", 2)
|
||||
for item in report.missing_for_humans:
|
||||
doc.bullet(item)
|
||||
doc.heading("Matriz por perfil", 2)
|
||||
rows = []
|
||||
for cell in sorted(report.cells, key=lambda item: item.profile_id):
|
||||
rows.append((cell.profile_id, str(cell.score), cell.maturity.value, cell.explanation[:180]))
|
||||
doc.table(("Perfil", "Score", "Maturidade", "Leitura"), rows)
|
||||
doc.heading("Recomendacoes", 2)
|
||||
for recommendation in report.recommendations[:8]:
|
||||
doc.bullet(f"{recommendation.title}: {recommendation.reason}")
|
||||
return doc.write(path)
|
||||
|
||||
|
||||
def write_ecosystem_docx(path: Path, reports: Sequence[PlatformHumanReport]) -> Path:
|
||||
doc = DocxDocument(title="Relatorio Geral do Ecossistema Mais Humano")
|
||||
for line in ecosystem_summary_lines(reports):
|
||||
if line == "Leitura por necessidade humana":
|
||||
doc.heading(line, 2)
|
||||
elif line.startswith("Plataformas "):
|
||||
doc.heading(line, 2)
|
||||
else:
|
||||
doc.paragraph(line)
|
||||
rows = []
|
||||
for report in sorted(reports, key=lambda item: item.platform.platform_id):
|
||||
rows.append((report.platform.platform_id, str(report.average_score), str(report.scan.code_lines), str(len(report.scan.evidence))))
|
||||
doc.heading("Resumo por plataforma", 2)
|
||||
doc.table(("Plataforma", "Score", "Linhas", "Evidencias"), rows)
|
||||
return doc.write(path)
|
||||
|
||||
|
||||
def write_profile_catalog(project_root: Path) -> Path:
|
||||
path = project_root / "pessoas-e-papeis" / "perfis-humanos.json"
|
||||
return write_json(path, HUMAN_PROFILES)
|
||||
|
||||
|
||||
def write_platform_catalog(project_root: Path) -> Path:
|
||||
path = project_root / "dados" / "catalogo-plataformas.json"
|
||||
return write_json(path, PLATFORMS)
|
||||
|
||||
|
||||
def write_matrix_csv(path: Path, table: Sequence[Sequence[str]]) -> Path:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
lines = []
|
||||
for row in table:
|
||||
escaped = []
|
||||
for value in row:
|
||||
text = str(value).replace('"', '""')
|
||||
if "," in text or "\n" in text:
|
||||
text = f'"{text}"'
|
||||
escaped.append(text)
|
||||
lines.append(",".join(escaped))
|
||||
path.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||
return path
|
||||
|
||||
|
||||
def write_human_goals(project_root: Path, reports: Sequence[PlatformHumanReport]) -> Path:
|
||||
path = project_root / "metas-humanas" / "metas-humanas-por-plataforma.md"
|
||||
lines = ["# Metas humanas por plataforma", ""]
|
||||
for report in sorted(reports, key=lambda item: item.platform.platform_id):
|
||||
lines.append(f"## {report.platform.title}")
|
||||
lines.append("")
|
||||
lines.append(f"Score atual: {report.average_score}")
|
||||
lines.append("")
|
||||
for gap in report.missing_for_humans[:5]:
|
||||
lines.append(f"- Converter lacuna em entrega: {gap}")
|
||||
lines.append("")
|
||||
path.write_text("\n".join(lines), encoding="utf-8")
|
||||
return path
|
||||
|
||||
|
||||
def write_screen_report_map(project_root: Path, reports: Sequence[PlatformHumanReport]) -> Path:
|
||||
path = project_root / "telas-e-relatorios" / "mapa-telas-relatorios-esperados.md"
|
||||
lines = ["# Mapa de telas e relatorios esperados", ""]
|
||||
for report in sorted(reports, key=lambda item: item.platform.platform_id):
|
||||
lines.append(f"## {report.platform.title}")
|
||||
lines.append("")
|
||||
for surface in report.platform.expected_surfaces:
|
||||
lines.append(f"- {surface}")
|
||||
lines.append("")
|
||||
path.write_text("\n".join(lines), encoding="utf-8")
|
||||
return path
|
||||
|
||||
|
||||
def write_paradigm(project_root: Path) -> Path:
|
||||
path = project_root / "paradigma" / "paradigma-mais-humano.md"
|
||||
text = """# Paradigma Mais Humano
|
||||
|
||||
A plataforma tudo-para-ia-mais-humana traduz estado tecnico em compreensao humana.
|
||||
|
||||
Ela pergunta:
|
||||
|
||||
- quem e atendido;
|
||||
- como e atendido;
|
||||
- o que ja funciona;
|
||||
- o que ainda falta;
|
||||
- qual ordem de servico melhora a experiencia real.
|
||||
|
||||
A plataforma nao substitui o nucleo, a central, o MCP ou a UI. Ela transforma evidencias dessas camadas em relatorios, matrizes e continuidade orientada a pessoas.
|
||||
"""
|
||||
path.write_text(text, encoding="utf-8")
|
||||
return path
|
||||
|
||||
|
||||
def generate(
|
||||
ecosystem_root: Path,
|
||||
project_root: Path,
|
||||
central_platform_folder: Path | None = None,
|
||||
relation_to_order: str = "0011_GERENCIAL__fundacao-da-plataforma",
|
||||
push_status: str | None = None,
|
||||
) -> ReportBundle:
|
||||
ensure_project_dirs(project_root)
|
||||
scans = scan_ecosystem(ecosystem_root)
|
||||
evidence_records = build_evidence_index(scans)
|
||||
cells = build_matrix(scans)
|
||||
platform_reports = build_platform_reports(scans, cells)
|
||||
recommendations = build_global_recommendations(platform_reports)
|
||||
quality_reports = evaluate_ecosystem_quality(platform_reports)
|
||||
human_questions = questions_for_ecosystem(platform_reports)
|
||||
playbooks = build_playbooks(platform_reports)
|
||||
insights = build_insights(platform_reports, recommendations)
|
||||
command_specs = base_validation_commands(project_root, central_platform_folder) + platform_validation_commands(platform_reports)
|
||||
ecosystem_report = EcosystemHumanReport(scans=scans, platform_reports=platform_reports, recommendations=recommendations)
|
||||
exit_orders = build_exit_orders(recommendations)
|
||||
generated: list[GeneratedFile] = []
|
||||
round_dossier = build_execution_round_dossier(
|
||||
project_root=project_root,
|
||||
platform_reports=platform_reports,
|
||||
recommendations=recommendations,
|
||||
output_orders=exit_orders,
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
)
|
||||
governance_portfolio = build_governance_portfolio(
|
||||
platform_reports,
|
||||
recommendations=recommendations,
|
||||
round_dossier=round_dossier,
|
||||
extra_text=(push_status or "",),
|
||||
)
|
||||
readiness_registry = build_readiness_registry(platform_reports, governance_portfolio)
|
||||
workflow_portfolio = build_workflow_portfolio(governance_portfolio)
|
||||
scenario_portfolio = build_scenario_portfolio(governance_portfolio)
|
||||
governance_orders = compile_governance_orders(governance_portfolio)
|
||||
governance_questions = build_operational_questions(governance_portfolio)
|
||||
line_budget = build_round_line_budget(ecosystem_root, project_root)
|
||||
lifecycle_package = (
|
||||
build_round_execution_package(
|
||||
central_platform_folder,
|
||||
governance_portfolio,
|
||||
round_dossier=round_dossier,
|
||||
total_code_lines_analyzed=line_budget.total_technical_lines,
|
||||
code_lines_available=line_budget.repositories[0].code_lines if line_budget.repositories else 0,
|
||||
)
|
||||
if central_platform_folder is not None
|
||||
else None
|
||||
)
|
||||
evidence_graph = build_evidence_graph(
|
||||
governance_portfolio,
|
||||
readiness_registry,
|
||||
workflow_portfolio,
|
||||
compiled_orders=governance_orders,
|
||||
)
|
||||
|
||||
profile_catalog = write_profile_catalog(project_root)
|
||||
generated.append(generated_file(profile_catalog, project_root, "Catalogo de perfis humanos considerado pela matriz.", "catalogo de perfis", "json", relation_to_order))
|
||||
platform_catalog = write_platform_catalog(project_root)
|
||||
generated.append(generated_file(platform_catalog, project_root, "Catalogo canonico das plataformas avaliadas.", "catalogo de plataformas", "json", relation_to_order))
|
||||
paradigm = write_paradigm(project_root)
|
||||
generated.append(generated_file(paradigm, project_root, "Paradigma institucional da plataforma Mais Humana.", "paradigma", "markdown", relation_to_order))
|
||||
|
||||
data_path = write_json(project_root / "dados" / "snapshot-ecossistema.json", ecosystem_report)
|
||||
generated.append(generated_file(data_path, project_root, "Snapshot JSON do ecossistema humano.", "dados auditaveis", "json", relation_to_order))
|
||||
evidence_json = write_json(project_root / "dados" / "indice-evidencias.json", evidence_records)
|
||||
generated.append(generated_file(evidence_json, project_root, "Indice JSON de evidencias coletadas.", "indice de evidencias", "json", relation_to_order))
|
||||
evidence_md = project_root / "ecossistema" / "INDICE-DE-EVIDENCIAS-HUMANAS.md"
|
||||
evidence_md.write_text(evidence_markdown(evidence_records), encoding="utf-8")
|
||||
generated.append(generated_file(evidence_md, project_root, "Indice Markdown de evidencias humanas.", "indice de evidencias", "markdown", relation_to_order))
|
||||
matrix_csv = write_matrix_csv(project_root / "matrizes" / "matriz-plataforma-perfil.csv", matrix_table(cells))
|
||||
generated.append(generated_file(matrix_csv, project_root, "Matriz plataforma x perfil em CSV.", "matriz tabular", "csv", relation_to_order))
|
||||
dossier_json = write_json(project_root / "dados" / "dossie-operacional-humano.json", round_dossier)
|
||||
generated.append(generated_file(dossier_json, project_root, "Dossie operacional humano da rodada em JSON.", "dossie operacional", "json", relation_to_order))
|
||||
dossier_md = project_root / "ecossistema" / "DOSSIE-OPERACIONAL-HUMANO.md"
|
||||
dossier_md.write_text(dossier_to_markdown(round_dossier), encoding="utf-8")
|
||||
generated.append(generated_file(dossier_md, project_root, "Dossie operacional humano da rodada em Markdown.", "dossie operacional", "markdown", relation_to_order))
|
||||
justifications_md = project_root / "ecossistema" / "JUSTIFICATIVA-ORDENS-DE-SERVICO.md"
|
||||
justifications_md.write_text(order_justifications_markdown(round_dossier), encoding="utf-8")
|
||||
generated.append(generated_file(justifications_md, project_root, "Justificativa das ordens de servico por evidencia e gate.", "justificativa de ordens", "markdown", relation_to_order))
|
||||
dossier_csv = project_root / "matrizes" / "dossie-operacional-humano.csv"
|
||||
dossier_csv.write_text(write_csv_lines(dossier_compact_rows(round_dossier)), encoding="utf-8")
|
||||
generated.append(generated_file(dossier_csv, project_root, "Resumo tabular do dossie operacional humano.", "dossie operacional", "csv", relation_to_order))
|
||||
|
||||
ecosystem_md = project_root / "ecossistema" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.md"
|
||||
ecosystem_md.write_text(ecosystem_markdown(platform_reports), encoding="utf-8")
|
||||
generated.append(generated_file(ecosystem_md, project_root, "Relatorio geral em Markdown.", "relatorio geral", "markdown", relation_to_order))
|
||||
ecosystem_docx = write_ecosystem_docx(project_root / "relatorios-docx" / "RELATORIO-GERAL-DO-ECOSSISTEMA-humana.docx", platform_reports)
|
||||
generated.append(generated_file(ecosystem_docx, project_root, "Relatorio geral em DOCX.", "relatorio docx", "docx", relation_to_order))
|
||||
|
||||
heatmap = matrix_heatmap_svg(project_root / "graficos" / "matriz-plataforma-perfil.svg", cells)
|
||||
generated.append(generated_file(heatmap, project_root, "Heatmap SVG da matriz plataforma x perfil.", "grafico", "svg", relation_to_order))
|
||||
bars = platform_bar_svg(project_root / "graficos" / "maturidade-por-plataforma.svg", platform_reports)
|
||||
generated.append(generated_file(bars, project_root, "Grafico SVG de maturidade por plataforma.", "grafico", "svg", relation_to_order))
|
||||
quality_md = project_root / "ecossistema" / "QUALITY-GATE-MAIS-HUMANO.md"
|
||||
quality_md.write_text(quality_to_markdown(quality_reports), encoding="utf-8")
|
||||
generated.append(generated_file(quality_md, project_root, "Quality gate humano por plataforma.", "quality gate", "markdown", relation_to_order))
|
||||
insights_md = project_root / "ecossistema" / "INSIGHTS-OPERACIONAIS-MAIS-HUMANA.md"
|
||||
insights_md.write_text(insights_markdown(insights), encoding="utf-8")
|
||||
generated.append(generated_file(insights_md, project_root, "Insights de risco, dependencias, roadmap e cobertura.", "insights", "markdown", relation_to_order))
|
||||
dot_path = project_root / "graficos" / "dependencias-humanas.dot"
|
||||
dot_path.write_text(dependency_dot(insights), encoding="utf-8")
|
||||
generated.append(generated_file(dot_path, project_root, "Grafo DOT de dependencias humanas entre plataformas.", "grafo", "dot", relation_to_order))
|
||||
questions_md = project_root / "pessoas-e-papeis" / "perguntas-humanas-respondidas.md"
|
||||
questions_md.write_text(questions_markdown(human_questions), encoding="utf-8")
|
||||
generated.append(generated_file(questions_md, project_root, "Perguntas humanas respondidas por plataforma e perfil.", "perguntas humanas", "markdown", relation_to_order))
|
||||
playbooks_md = project_root / "pessoas-e-papeis" / "playbooks-humanos.md"
|
||||
playbooks_md.write_text(playbooks_markdown(playbooks), encoding="utf-8")
|
||||
generated.append(generated_file(playbooks_md, project_root, "Playbooks humanos por perfil operacional.", "playbooks", "markdown", relation_to_order))
|
||||
commands_md = project_root / "ecossistema" / "COMANDOS-HUMANOS-EQUIVALENTES.md"
|
||||
commands_md.write_text(commands_markdown(command_specs), encoding="utf-8")
|
||||
generated.append(generated_file(commands_md, project_root, "Comandos humanos equivalentes para validacao.", "comandos", "markdown", relation_to_order))
|
||||
|
||||
for report in platform_reports:
|
||||
md_path = project_root / "plataformas" / f"{report.platform.platform_id}.md"
|
||||
md_path.write_text(platform_markdown(report), encoding="utf-8")
|
||||
generated.append(generated_file(md_path, project_root, f"Relatorio humano Markdown da plataforma {report.platform.platform_id}.", "relatorio por plataforma", "markdown", relation_to_order))
|
||||
docx_path = write_platform_docx(project_root / "relatorios-docx" / "plataformas" / f"{report.platform.platform_id}.docx", report)
|
||||
generated.append(generated_file(docx_path, project_root, f"Relatorio humano DOCX da plataforma {report.platform.platform_id}.", "relatorio docx por plataforma", "docx", relation_to_order))
|
||||
radar = profile_radar_svg(project_root / "graficos" / f"radar-{report.platform.platform_id}.svg", report)
|
||||
generated.append(generated_file(radar, project_root, f"Radar SVG humano da plataforma {report.platform.platform_id}.", "grafico radar", "svg", relation_to_order))
|
||||
|
||||
goals = write_human_goals(project_root, platform_reports)
|
||||
generated.append(generated_file(goals, project_root, "Metas humanas por plataforma.", "metas humanas", "markdown", relation_to_order))
|
||||
screen_map = write_screen_report_map(project_root, platform_reports)
|
||||
generated.append(generated_file(screen_map, project_root, "Mapa de telas e relatorios esperados.", "mapa de superficie", "markdown", relation_to_order))
|
||||
|
||||
order_summary = project_root / "os-orientadoras" / "ordens-de-saida.json"
|
||||
write_json(order_summary, exit_orders)
|
||||
generated.append(generated_file(order_summary, project_root, "Ordens de saida em JSON.", "ordens orientadoras", "json", relation_to_order))
|
||||
quality_json = write_json(project_root / "dados" / "quality-gates.json", quality_reports)
|
||||
generated.append(generated_file(quality_json, project_root, "Quality gates em JSON.", "quality gates", "json", relation_to_order))
|
||||
questions_json = write_json(project_root / "dados" / "perguntas-humanas.json", human_questions)
|
||||
generated.append(generated_file(questions_json, project_root, "Perguntas humanas em JSON.", "perguntas humanas", "json", relation_to_order))
|
||||
playbooks_json = write_json(project_root / "dados" / "playbooks-humanos.json", playbooks)
|
||||
generated.append(generated_file(playbooks_json, project_root, "Playbooks humanos em JSON.", "playbooks", "json", relation_to_order))
|
||||
commands_json = write_json(project_root / "dados" / "comandos-humanos-equivalentes.json", command_specs)
|
||||
generated.append(generated_file(commands_json, project_root, "Comandos humanos equivalentes em JSON.", "comandos", "json", relation_to_order))
|
||||
insights_json = write_json(project_root / "dados" / "insights-operacionais.json", insights)
|
||||
generated.append(generated_file(insights_json, project_root, "Insights operacionais em JSON.", "insights", "json", relation_to_order))
|
||||
index_html = write_index_html(project_root / "ecossistema" / "index.html", platform_reports, quality_reports)
|
||||
generated.append(generated_file(index_html, project_root, "Indice HTML local para revisao dos relatorios humanos.", "html operacional", "html", relation_to_order))
|
||||
|
||||
governance_export_bundle = write_governance_exports(
|
||||
project_root,
|
||||
governance_exports(
|
||||
project_root,
|
||||
governance_portfolio,
|
||||
readiness_registry,
|
||||
workflow_portfolio,
|
||||
scenario_portfolio,
|
||||
evidence_graph,
|
||||
governance_questions,
|
||||
budget=line_budget,
|
||||
compiled_orders=governance_orders,
|
||||
lifecycle=lifecycle_package,
|
||||
),
|
||||
relation_to_order,
|
||||
)
|
||||
generated.extend(governance_export_bundle.generated_records)
|
||||
governance_snapshot_path = project_root / "dados" / "snapshot-governanca-atual.json"
|
||||
previous_governance_snapshot = load_governance_snapshot(governance_snapshot_path)
|
||||
current_governance_snapshot = snapshot_from_portfolio(governance_portfolio)
|
||||
write_governance_snapshot(governance_snapshot_path, current_governance_snapshot)
|
||||
generated.append(generated_file(governance_snapshot_path, project_root, "Snapshot compacto de governanca operacional.", "snapshot governanca", "json", relation_to_order))
|
||||
governance_delta_path = project_root / "ecossistema" / "DELTA-GOVERNANCA-OPERACIONAL.md"
|
||||
governance_delta_path.write_text(
|
||||
governance_delta_markdown(diff_governance_snapshots(previous_governance_snapshot, current_governance_snapshot)),
|
||||
encoding="utf-8",
|
||||
)
|
||||
generated.append(generated_file(governance_delta_path, project_root, "Delta de governanca operacional.", "delta governanca", "markdown", relation_to_order))
|
||||
governance_delta_csv = project_root / "matrizes" / "delta-governanca-operacional.csv"
|
||||
governance_delta_csv.write_text(
|
||||
rows_to_csv(governance_delta_rows(diff_governance_snapshots(previous_governance_snapshot, current_governance_snapshot))),
|
||||
encoding="utf-8",
|
||||
)
|
||||
generated.append(generated_file(governance_delta_csv, project_root, "Delta de governanca operacional em CSV.", "delta governanca", "csv", relation_to_order))
|
||||
|
||||
if central_platform_folder is not None:
|
||||
written_orders = write_orders(exit_orders, central_platform_folder)
|
||||
write_json(order_summary, exit_orders)
|
||||
for path in written_orders:
|
||||
generated.append(generated_file(path, project_root, "Ordem de saida criada na central.", "ordem de servico", "markdown", relation_to_order))
|
||||
reports_dir = central_platform_folder / "reports"
|
||||
reports_dir.mkdir(parents=True, exist_ok=True)
|
||||
executed = reports_dir / "EXECUTADO__fundacao-tudo-para-ia-mais-humana.md"
|
||||
executed.write_text(executed_order_markdown(platform_reports, exit_orders), encoding="utf-8")
|
||||
generated.append(generated_file(executed, project_root, "Registro EXECUTADO da rodada.", "registro de execucao", "markdown", relation_to_order))
|
||||
pending = reports_dir / "PENDENCIAS-CODEX__fundacao-tudo-para-ia-mais-humana.md"
|
||||
pending.write_text(pending_markdown(platform_reports, push_status=push_status), encoding="utf-8")
|
||||
generated.append(generated_file(pending, project_root, "Registro de pendencias reais da rodada.", "pendencias", "markdown", relation_to_order))
|
||||
audit_dir = central_platform_folder / "audit"
|
||||
audit_dir.mkdir(parents=True, exist_ok=True)
|
||||
audit = audit_dir / "AUDITORIA-GPT__fundacao-tudo-para-ia-mais-humana.md"
|
||||
audit.write_text(audit_markdown(platform_reports, exit_orders), encoding="utf-8")
|
||||
generated.append(generated_file(audit, project_root, "Auditoria da rodada.", "auditoria", "markdown", relation_to_order))
|
||||
operational_executed = reports_dir / "EXECUTADO__rodada-operacional-mais-humana.md"
|
||||
operational_executed.write_text(dossier_to_markdown(round_dossier), encoding="utf-8")
|
||||
generated.append(generated_file(operational_executed, project_root, "Registro EXECUTADO operacional com dossie humano.", "registro de execucao", "markdown", relation_to_order))
|
||||
operational_pending = reports_dir / "PENDENCIAS-CODEX__rodada-operacional-mais-humana.md"
|
||||
operational_pending.write_text("\n".join(["# Pendencias operacionais consolidadas", ""] + [f"- {item}" for item in round_dossier.pending_items]) + "\n", encoding="utf-8")
|
||||
generated.append(generated_file(operational_pending, project_root, "Pendencias consolidadas do dossie operacional.", "pendencias", "markdown", relation_to_order))
|
||||
operational_audit = audit_dir / "AUDITORIA-GPT__rodada-operacional-mais-humana.md"
|
||||
operational_audit.write_text(order_justifications_markdown(round_dossier), encoding="utf-8")
|
||||
generated.append(generated_file(operational_audit, project_root, "Auditoria operacional das ordens tratadas.", "auditoria", "markdown", relation_to_order))
|
||||
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
|
||||
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
|
||||
|
||||
provisional_bundle = ReportBundle(
|
||||
output_root=str(project_root),
|
||||
generated_files=tuple(generated),
|
||||
platform_count=len(platform_reports),
|
||||
profile_count=len(HUMAN_PROFILES),
|
||||
matrix_cells=len(cells),
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
|
||||
)
|
||||
acceptance = build_acceptance_report(project_root, platform_reports, exit_orders, provisional_bundle)
|
||||
acceptance_path = project_root / "ecossistema" / "ACCEPTANCE-CHECKLIST-MAIS-HUMANA.md"
|
||||
acceptance_path.write_text(acceptance_markdown(acceptance), encoding="utf-8")
|
||||
generated.append(generated_file(acceptance_path, project_root, "Checklist de aceite da rodada.", "acceptance", "markdown", relation_to_order))
|
||||
redaction = scan_generated_artifacts(project_root)
|
||||
redaction_path = project_root / "ecossistema" / "REDACTION-CHECK-MAIS-HUMANA.md"
|
||||
redaction_path.write_text(redaction_markdown(redaction), encoding="utf-8")
|
||||
generated.append(generated_file(redaction_path, project_root, "Checagem textual de vazamento de segredos.", "redaction", "markdown", relation_to_order))
|
||||
snapshot_path = project_root / "dados" / "snapshot-score-atual.json"
|
||||
previous_snapshot = load_snapshot(snapshot_path)
|
||||
current_snapshot = snapshot_from_reports(platform_reports)
|
||||
write_snapshot(snapshot_path, current_snapshot)
|
||||
generated.append(generated_file(snapshot_path, project_root, "Snapshot compacto de score por plataforma.", "snapshot", "json", relation_to_order))
|
||||
delta_path = project_root / "ecossistema" / "DELTA-MATURIDADE-HUMANA.md"
|
||||
delta_path.write_text(snapshot_delta_markdown(diff_snapshots(previous_snapshot, current_snapshot)), encoding="utf-8")
|
||||
generated.append(generated_file(delta_path, project_root, "Delta de maturidade humana contra snapshot anterior.", "delta", "markdown", relation_to_order))
|
||||
|
||||
if central_platform_folder is not None:
|
||||
central_bundle = ReportBundle(
|
||||
output_root=str(project_root),
|
||||
generated_files=tuple(generated),
|
||||
platform_count=len(platform_reports),
|
||||
profile_count=len(HUMAN_PROFILES),
|
||||
matrix_cells=len(cells),
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
|
||||
)
|
||||
for path in write_central_status_pages(central_platform_folder, central_bundle, platform_reports, exit_orders):
|
||||
generated.append(generated_file(path, project_root, "Pagina de estado/indice da central.", "estado central", "markdown", relation_to_order))
|
||||
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
|
||||
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
|
||||
|
||||
final_bundle = ReportBundle(
|
||||
output_root=str(project_root),
|
||||
generated_files=tuple(generated),
|
||||
platform_count=len(platform_reports),
|
||||
profile_count=len(HUMAN_PROFILES),
|
||||
matrix_cells=len(cells),
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
|
||||
)
|
||||
assurance = build_assurance_suite(
|
||||
project_root=project_root,
|
||||
bundle=final_bundle,
|
||||
platform_reports=platform_reports,
|
||||
portfolio=governance_portfolio,
|
||||
lifecycle_package=lifecycle_package,
|
||||
compiled_orders=governance_orders,
|
||||
central_folder=central_platform_folder,
|
||||
extra_text=(push_status or "",),
|
||||
)
|
||||
assurance_json = write_json(project_root / "dados" / "assurance-rodada.json", assurance)
|
||||
generated.append(generated_file(assurance_json, project_root, "Assurance da rodada em JSON.", "assurance", "json", relation_to_order))
|
||||
assurance_md = project_root / "ecossistema" / "ASSURANCE-RODADA-MAIS-HUMANA.md"
|
||||
assurance_md.write_text(assurance_markdown(assurance), encoding="utf-8")
|
||||
generated.append(generated_file(assurance_md, project_root, "Assurance da rodada em Markdown.", "assurance", "markdown", relation_to_order))
|
||||
assurance_csv = project_root / "matrizes" / "assurance-rodada.csv"
|
||||
assurance_csv.write_text(rows_to_csv(assurance_rows(assurance)), encoding="utf-8")
|
||||
generated.append(generated_file(assurance_csv, project_root, "Assurance da rodada em CSV.", "assurance", "csv", relation_to_order))
|
||||
if central_platform_folder is not None and lifecycle_package is not None:
|
||||
for path in write_central_lifecycle_exports(central_platform_folder, lifecycle_package):
|
||||
generated.append(generated_file(path, project_root, "Fechamento lifecycle das ordens ativas na central.", "lifecycle central", "markdown", relation_to_order))
|
||||
reconciled = build_reconciled_status(
|
||||
governance_portfolio,
|
||||
readiness_registry,
|
||||
workflow_portfolio,
|
||||
scenario_portfolio,
|
||||
lifecycle=lifecycle_package,
|
||||
budget=line_budget,
|
||||
assurance=assurance,
|
||||
)
|
||||
for path in write_reconciled_status(central_platform_folder, reconciled):
|
||||
generated.append(generated_file(path, project_root, "Estado reconciliado da central.", "estado reconciliado", "markdown", relation_to_order))
|
||||
write_governance_semantic_state(
|
||||
central_platform_folder / "controle-semantico.sqlite",
|
||||
governance_portfolio,
|
||||
readiness_registry,
|
||||
workflow_portfolio,
|
||||
scenario_portfolio,
|
||||
assurance=assurance,
|
||||
lifecycle=lifecycle_package,
|
||||
budget=line_budget,
|
||||
)
|
||||
final_bundle = ReportBundle(
|
||||
output_root=str(project_root),
|
||||
generated_files=tuple(generated),
|
||||
platform_count=len(platform_reports),
|
||||
profile_count=len(HUMAN_PROFILES),
|
||||
matrix_cells=len(cells),
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
|
||||
)
|
||||
contract = build_contract(final_bundle, platform_reports)
|
||||
contract_json = write_json(project_root / "dados" / "contrato-publico-mais-humana.json", contract)
|
||||
generated.append(generated_file(contract_json, project_root, "Contrato publico JSON da plataforma Mais Humana.", "contrato", "json", relation_to_order))
|
||||
contract_md = project_root / "ecossistema" / "CONTRATO-PUBLICO-MAIS-HUMANA.md"
|
||||
contract_md.write_text(contract_markdown(contract), encoding="utf-8")
|
||||
generated.append(generated_file(contract_md, project_root, "Contrato publico Markdown da plataforma Mais Humana.", "contrato", "markdown", relation_to_order))
|
||||
|
||||
if central_platform_folder is not None:
|
||||
sqlite_path = central_platform_folder / "controle-semantico.sqlite"
|
||||
write_semantic_state(sqlite_path, tuple(generated), exit_orders, platform_reports, recommendations, round_dossier)
|
||||
|
||||
return ReportBundle(
|
||||
output_root=str(project_root),
|
||||
generated_files=tuple(generated),
|
||||
platform_count=len(platform_reports),
|
||||
profile_count=len(HUMAN_PROFILES),
|
||||
matrix_cells=len(cells),
|
||||
total_code_lines_analyzed=ecosystem_report.total_code_lines,
|
||||
warnings=tuple(warning for report in platform_reports for warning in report.scan.warnings),
|
||||
)
|
||||
Reference in New Issue
Block a user