feat: fundar plataforma mais humana

This commit is contained in:
Ami Soares
2026-04-30 06:42:00 -03:00
commit c9c1056193
183 changed files with 639629 additions and 0 deletions

446
src/mais_humana/matrix.py Normal file
View File

@@ -0,0 +1,446 @@
"""Scoring and matrix generation for human service coverage."""
from __future__ import annotations
from collections import Counter
from typing import Iterable, Sequence
from .catalog import HUMAN_PROFILES, CATEGORY_KEYWORDS, categories_for_text
from .models import (
EvidenceKind,
HumanProfile,
MatrixCell,
MaturityLevel,
NeedCategory,
PlatformDefinition,
PlatformHumanReport,
PlatformScan,
Recommendation,
OrderType,
clamp_score,
maturity_from_score,
merge_unique,
score_label,
)
EVIDENCE_WEIGHTS: dict[EvidenceKind, int] = {
EvidenceKind.README: 5,
EvidenceKind.PACKAGE_SCRIPT: 7,
EvidenceKind.ROUTE: 8,
EvidenceKind.OPENAPI: 12,
EvidenceKind.TEST: 10,
EvidenceKind.CONFIG: 5,
EvidenceKind.DOC: 5,
EvidenceKind.WORKER: 8,
EvidenceKind.STORAGE: 8,
EvidenceKind.MCP_TOOL: 12,
EvidenceKind.UI_SURFACE: 11,
EvidenceKind.SECURITY: 12,
EvidenceKind.BUSINESS_RULE: 10,
EvidenceKind.OBSERVABILITY: 11,
EvidenceKind.UNKNOWN: 2,
}
PROFILE_SIGNAL_BONUS: dict[NeedCategory, tuple[EvidenceKind, ...]] = {
NeedCategory.ADMINISTRATION: (EvidenceKind.MCP_TOOL, EvidenceKind.UI_SURFACE, EvidenceKind.ROUTE),
NeedCategory.SUPPORT: (EvidenceKind.OBSERVABILITY, EvidenceKind.ROUTE, EvidenceKind.DOC),
NeedCategory.FINANCE: (EvidenceKind.BUSINESS_RULE, EvidenceKind.ROUTE, EvidenceKind.OPENAPI),
NeedCategory.LEGAL: (EvidenceKind.DOC, EvidenceKind.SECURITY, EvidenceKind.OBSERVABILITY),
NeedCategory.SECURITY: (EvidenceKind.SECURITY, EvidenceKind.OPENAPI, EvidenceKind.TEST),
NeedCategory.OPERATIONS: (EvidenceKind.OBSERVABILITY, EvidenceKind.PACKAGE_SCRIPT, EvidenceKind.WORKER),
NeedCategory.STRATEGY: (EvidenceKind.DOC, EvidenceKind.OBSERVABILITY, EvidenceKind.MCP_TOOL),
NeedCategory.DOCUMENTATION: (EvidenceKind.README, EvidenceKind.DOC, EvidenceKind.OPENAPI),
NeedCategory.SELF_SERVICE: (EvidenceKind.UI_SURFACE, EvidenceKind.ROUTE, EvidenceKind.MCP_TOOL),
NeedCategory.COMMERCIAL: (EvidenceKind.BUSINESS_RULE, EvidenceKind.OPENAPI, EvidenceKind.ROUTE),
NeedCategory.EXPERIENCE: (EvidenceKind.UI_SURFACE, EvidenceKind.README, EvidenceKind.MCP_TOOL),
NeedCategory.GOVERNANCE: (EvidenceKind.OBSERVABILITY, EvidenceKind.SECURITY, EvidenceKind.OPENAPI),
NeedCategory.INTEGRATION: (EvidenceKind.MCP_TOOL, EvidenceKind.WORKER, EvidenceKind.SECURITY),
NeedCategory.OBSERVABILITY: (EvidenceKind.OBSERVABILITY, EvidenceKind.TEST, EvidenceKind.ROUTE),
}
def evidence_counter(scan: PlatformScan) -> Counter[EvidenceKind]:
counter: Counter[EvidenceKind] = Counter()
for evidence in scan.evidence:
counter[evidence.kind] += 1
return counter
def category_text_score(scan: PlatformScan, categories: Iterable[NeedCategory]) -> int:
text = " ".join(
[
scan.platform.title,
scan.platform.mission,
scan.readme_excerpt,
" ".join(evidence.summary for evidence in scan.evidence[:120]),
]
).lower()
score = 0
for category in categories:
keywords = CATEGORY_KEYWORDS.get(category, ())
hits = sum(1 for keyword in keywords if keyword.lower() in text)
score += min(14, hits * 3)
return min(32, score)
def base_repository_score(scan: PlatformScan) -> int:
if not scan.exists:
return 0
score = 8
if scan.git_present:
score += 7
if scan.readme_excerpt:
score += 8
if scan.code_lines > 0:
score += 8
if scan.code_lines >= 1000:
score += 4
if scan.code_lines >= 5000:
score += 4
if scan.has_tests:
score += 8
if scan.has_openapi:
score += 8
if scan.has_worker:
score += 4
if scan.scripts:
score += min(7, len(scan.scripts))
return min(64, score)
def profile_alignment_score(scan: PlatformScan, profile: HumanProfile) -> int:
score = 0
counter = evidence_counter(scan)
for category in profile.priority_needs:
if category in scan.platform.primary_categories:
score += 8
for kind in PROFILE_SIGNAL_BONUS.get(category, ()):
score += min(9, counter[kind] * 2)
if profile.profile_id in scan.platform.expected_profiles:
score += 14
categories_from_text = set(categories_for_text(scan.readme_excerpt))
score += len(categories_from_text.intersection(profile.priority_needs)) * 4
return min(44, score)
def penalty_score(scan: PlatformScan, profile: HumanProfile) -> int:
penalty = 0
warning_text = " ".join(scan.warnings).lower()
if "sem .git" in warning_text:
penalty += 8
if "nenhuma linha de codigo" in warning_text:
penalty += 20
if "testes nao encontrados" in warning_text and NeedCategory.OPERATIONS in profile.priority_needs:
penalty += 6
if "openapi" in warning_text and NeedCategory.INTEGRATION in profile.priority_needs:
penalty += 6
if scan.platform.known_blockers:
penalty += min(14, 5 * len(scan.platform.known_blockers))
return penalty
def build_strengths(scan: PlatformScan, profile: HumanProfile, score: int) -> tuple[str, ...]:
strengths: list[str] = []
if scan.exists:
strengths.append("repositorio real encontrado")
if scan.git_present:
strengths.append("historico Git local disponivel")
if scan.readme_excerpt:
strengths.append("README tecnico fornece contexto inicial")
if scan.has_tests:
strengths.append("testes foram detectados")
if scan.has_openapi:
strengths.append("contrato OpenAPI foi detectado")
if scan.has_worker:
strengths.append("sinais de Worker/Cloudflare foram detectados")
if profile.profile_id in scan.platform.expected_profiles:
strengths.append(f"plataforma declarada como relevante para {profile.name}")
for category in profile.priority_needs:
if category in scan.platform.primary_categories:
strengths.append(f"categoria {category.value} e parte do papel principal da plataforma")
if score >= 75:
strengths.append("pontuacao indica atendimento humano forte ou pronto")
return merge_unique(strengths)[:8]
def build_gaps(scan: PlatformScan, profile: HumanProfile, score: int) -> tuple[str, ...]:
gaps: list[str] = []
if not scan.exists:
gaps.append("repositorio real nao existe no espelho local")
if scan.exists and not scan.git_present:
gaps.append("repositorio precisa de Git inicializado e remoto configurado")
if scan.exists and not scan.readme_excerpt:
gaps.append("falta README tecnico para leitura humana")
if scan.exists and not scan.has_tests:
gaps.append("faltam testes detectaveis para provar comportamento")
if scan.exists and not scan.has_openapi and NeedCategory.INTEGRATION in profile.priority_needs:
gaps.append("falta contrato OpenAPI ou equivalente para integracao auditavel")
if scan.platform.known_blockers:
gaps.extend(scan.platform.known_blockers)
missing_categories = [category.value for category in profile.priority_needs if category not in scan.platform.primary_categories]
if score < 60 and missing_categories:
gaps.append("categorias humanas secundarias precisam de explicacao: " + ", ".join(missing_categories[:4]))
if score < 40:
gaps.append("atendimento humano ainda aparece mais planejado do que operacional")
return merge_unique(gaps)[:8]
def evidence_refs(scan: PlatformScan, profile: HumanProfile) -> tuple[str, ...]:
desired_kinds: set[EvidenceKind] = set()
for category in profile.priority_needs:
desired_kinds.update(PROFILE_SIGNAL_BONUS.get(category, ()))
refs: list[str] = []
for evidence in scan.evidence:
if evidence.kind in desired_kinds or evidence.is_strong():
refs.append(evidence.reference)
if len(refs) >= 8:
break
return merge_unique(refs)
def explain_cell(scan: PlatformScan, profile: HumanProfile, score: int, maturity: MaturityLevel) -> str:
label = score_label(score)
if not scan.exists:
return f"{profile.name} ainda nao tem base local analisavel em {scan.platform.title}."
if score >= 75:
return (
f"{scan.platform.title} atende {profile.name} em nivel {label}, "
f"com maturidade {maturity.value} e evidencias tecnicas suficientes para leitura humana."
)
if score >= 50:
return (
f"{scan.platform.title} ja oferece sinais uteis para {profile.name}, "
"mas precisa transformar capacidades tecnicas em telas, relatorios ou comandos mais claros."
)
return (
f"{scan.platform.title} ainda parece {label} para {profile.name}; "
"a proxima evolucao deve explicitar necessidades humanas, evidencias e criterio de pronto."
)
def score_cell(scan: PlatformScan, profile: HumanProfile) -> MatrixCell:
score = base_repository_score(scan)
score += profile_alignment_score(scan, profile)
score += category_text_score(scan, profile.priority_needs)
score -= penalty_score(scan, profile)
score = clamp_score(score)
maturity = maturity_from_score(score)
return MatrixCell(
platform_id=scan.platform.platform_id,
profile_id=profile.profile_id,
score=score,
maturity=maturity,
explanation=explain_cell(scan, profile, score, maturity),
strengths=build_strengths(scan, profile, score),
gaps=build_gaps(scan, profile, score),
evidence_refs=evidence_refs(scan, profile),
)
def build_matrix(scans: Sequence[PlatformScan], profiles: Sequence[HumanProfile] = HUMAN_PROFILES) -> tuple[MatrixCell, ...]:
cells: list[MatrixCell] = []
for scan in scans:
for profile in profiles:
cells.append(score_cell(scan, profile))
return tuple(cells)
def cells_for_platform(cells: Sequence[MatrixCell], platform_id: str) -> tuple[MatrixCell, ...]:
return tuple(cell for cell in cells if cell.platform_id == platform_id)
def top_gaps(cells: Sequence[MatrixCell], limit: int = 8) -> tuple[str, ...]:
gaps: list[str] = []
for cell in sorted(cells, key=lambda item: item.score):
gaps.extend(cell.gaps)
if len(gaps) >= limit:
break
return merge_unique(gaps)[:limit]
def top_strengths(cells: Sequence[MatrixCell], limit: int = 8) -> tuple[str, ...]:
strengths: list[str] = []
for cell in sorted(cells, key=lambda item: item.score, reverse=True):
strengths.extend(cell.strengths)
if len(strengths) >= limit:
break
return merge_unique(strengths)[:limit]
def build_recommendations_for_scan(scan: PlatformScan, cells: Sequence[MatrixCell]) -> tuple[Recommendation, ...]:
recommendations: list[Recommendation] = []
platform_id = scan.platform.platform_id
low_cells = [cell for cell in cells if cell.score < 60]
if not scan.exists:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-criar-repositorio-real",
platform_id=platform_id,
title="Criar e inicializar repositorio real da plataforma",
reason="A plataforma nao possui base local analisavel.",
expected_impact="Permitir execucao tecnica, versionamento e sincronizacao.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
priority=100,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(scan.repo_path,),
validation_steps=("git status", "git remote -v", "linha de base criada"),
)
)
if scan.exists and not scan.git_present:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-inicializar-git",
platform_id=platform_id,
title="Inicializar Git e configurar origin correto",
reason="O repositorio existe sem .git, impedindo rastreabilidade e sincronizacao.",
expected_impact="Fechar base operacional minima para commits, push e hash final.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OPERATIONS),
priority=95,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(scan.repo_path,),
validation_steps=("git status --short --branch", "git remote -v"),
)
)
if scan.exists and not scan.readme_excerpt:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-readme-humano",
platform_id=platform_id,
title="Criar README tecnico e humano da plataforma",
reason="Sem README, o estado tecnico nao vira compreensao humana inicial.",
expected_impact="Dar missao, escopo, comandos e criterios de validacao.",
categories=(NeedCategory.DOCUMENTATION, NeedCategory.EXPERIENCE),
priority=90,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(f"{scan.repo_path}/README.md",),
validation_steps=("README revisado", "links e comandos existentes"),
)
)
if scan.exists and not scan.has_tests:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-testes-canonicos",
platform_id=platform_id,
title="Criar testes canonicos de leitura humana",
reason="A varredura nao encontrou testes suficientes para validar comportamento.",
expected_impact="Aumentar confianca de suporte, operacao e auditoria.",
categories=(NeedCategory.OPERATIONS, NeedCategory.OBSERVABILITY),
priority=75,
suggested_order_type=OrderType.EXECUTIVE,
affected_paths=(f"{scan.repo_path}/tests",),
validation_steps=("suite local executada", "relatorio de testes registrado"),
)
)
if low_cells:
weakest = sorted(low_cells, key=lambda cell: cell.score)[:4]
profile_ids = ", ".join(cell.profile_id for cell in weakest)
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-matriz-perfis-fracos",
platform_id=platform_id,
title="Fechar lacunas da matriz humana por perfil",
reason=f"Perfis com baixo atendimento detectados: {profile_ids}.",
expected_impact="Transformar capacidade tecnica em telas, relatorios e mensagens de acao.",
categories=(NeedCategory.EXPERIENCE, NeedCategory.GOVERNANCE, NeedCategory.SUPPORT),
priority=70,
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(scan.repo_path,),
validation_steps=("matriz regenerada", "lacunas reclassificadas", "OS de continuidade criada"),
)
)
if scan.platform.known_blockers:
recommendations.append(
Recommendation(
recommendation_id=f"{platform_id}-bloqueios-conhecidos",
platform_id=platform_id,
title="Resolver ou formalizar bloqueios conhecidos",
reason="A plataforma possui bloqueios de maturidade ja mapeados.",
expected_impact="Reduzir contradicao entre readiness tecnico e utilidade humana.",
categories=(NeedCategory.GOVERNANCE, NeedCategory.OBSERVABILITY),
priority=85,
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(scan.repo_path,),
validation_steps=("bloqueios documentados", "status reavaliado", "evidencia anexada"),
)
)
recommendations.sort(key=lambda item: (-item.priority, item.title))
return tuple(recommendations)
def build_platform_report(scan: PlatformScan, all_cells: Sequence[MatrixCell]) -> PlatformHumanReport:
cells = cells_for_platform(all_cells, scan.platform.platform_id)
recommendations = build_recommendations_for_scan(scan, cells)
strengths = top_strengths(cells)
gaps = top_gaps(cells)
if scan.exists:
summary = (
f"{scan.platform.title} foi analisada com {scan.code_lines} linhas de codigo e "
f"{len(scan.evidence)} evidencias locais. Score medio humano: "
f"{round(sum(cell.score for cell in cells) / len(cells)) if cells else 0}."
)
else:
summary = f"{scan.platform.title} nao possui repositorio local analisavel."
current_state = tuple(strengths) or ("estado inicial sem evidencias fortes",)
future_state = tuple(
[
"telas e relatorios devem responder quem e atendido, como e atendido e qual proxima acao",
"evidencias devem ser exportaveis para GPT, painel e central de ordens",
"cada lacuna humana deve gerar OS executavel com validacao clara",
]
)
missing = tuple(gaps) or ("nenhuma lacuna principal detectada pela matriz atual",)
return PlatformHumanReport(
platform=scan.platform,
scan=scan,
cells=cells,
recommendations=recommendations,
summary=summary,
current_state=current_state,
future_state=future_state,
missing_for_humans=missing,
)
def build_platform_reports(scans: Sequence[PlatformScan], cells: Sequence[MatrixCell]) -> tuple[PlatformHumanReport, ...]:
return tuple(build_platform_report(scan, cells) for scan in scans)
def build_global_recommendations(reports: Sequence[PlatformHumanReport]) -> tuple[Recommendation, ...]:
recommendations: list[Recommendation] = []
for report in reports:
recommendations.extend(report.recommendations[:3])
average_by_platform = sorted(reports, key=lambda report: report.average_score)
for report in average_by_platform[:5]:
recommendations.append(
Recommendation(
recommendation_id=f"global-elevar-{report.platform.platform_id}",
platform_id=report.platform.platform_id,
title=f"Elevar maturidade humana de {report.platform.title}",
reason=f"Score medio atual {report.average_score}; lacunas principais exigem continuidade.",
expected_impact="Aumentar clareza para administradores, suporte, clientes e planejamento.",
categories=report.platform.primary_categories,
priority=65 + max(0, 60 - report.average_score),
suggested_order_type=OrderType.MANAGERIAL,
affected_paths=(report.scan.repo_path,),
validation_steps=("relatorio regenerado", "score comparado", "pendencias atualizadas"),
)
)
recommendations.sort(key=lambda item: (-item.priority, item.platform_id, item.title))
return tuple(recommendations)
def matrix_table(cells: Sequence[MatrixCell], profiles: Sequence[HumanProfile] = HUMAN_PROFILES) -> list[list[str]]:
platform_ids = sorted({cell.platform_id for cell in cells})
by_pair = {(cell.platform_id, cell.profile_id): cell for cell in cells}
header = ["platform"] + [profile.profile_id for profile in profiles]
rows = [header]
for platform_id in platform_ids:
row = [platform_id]
for profile in profiles:
cell = by_pair.get((platform_id, profile.profile_id))
row.append(str(cell.score if cell else 0))
rows.append(row)
return rows