- agency-archivist/SKILL.md: Skill per upload, estrazione, catalogazione
- scripts/extract_archive.py: Estrazione zip/URL in clients/{client}/assets/
- scripts/scan_resources.py: Scansione metadata (2 passate: base + vision)
- scripts/generate_catalog.py: Generazione catalogo.md con tag e use case
- references/resource_types.md: Tipologie risorse e use case per skill
- agency-orchestrator/SKILL.md: Integrazione archivist in Fase 1
- Step opzionale upload risorse
- Sezione dedicata gestione risorse
- Comportamento proattivo (richiesta risorse mancanti)
- Pattern per altre skill
Integrazione completa: orchestrator → archivist → visual-generator/design/web/social
266 lines
8.5 KiB
Python
Executable file
266 lines
8.5 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
"""
|
|
generate_catalog.py — Genera catalogo markdown dai metadata delle risorse
|
|
|
|
Usage:
|
|
python generate_catalog.py --client <client_name>
|
|
python generate_catalog.py --client demo_co_srl
|
|
|
|
Options:
|
|
--input Path metadata JSON (default: assets/.metadata.json)
|
|
--output Path output catalog.md (default: assets/catalog.md)
|
|
--verbose Log dettagliato
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import argparse
|
|
import json
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
from collections import defaultdict
|
|
|
|
def load_metadata(input_path):
|
|
"""Carica metadata da JSON."""
|
|
with open(input_path, 'r') as f:
|
|
return json.load(f)
|
|
|
|
def format_size(size_bytes):
|
|
"""Formatta dimensione in KB/MB/GB."""
|
|
for unit in ['B', 'KB', 'MB', 'GB']:
|
|
if size_bytes < 1024:
|
|
return f"{size_bytes:.1f} {unit}"
|
|
size_bytes /= 1024
|
|
return f"{size_bytes:.1f} TB"
|
|
|
|
def group_by_type(resources):
|
|
"""Raggruppa risorse per tipo."""
|
|
grouped = defaultdict(list)
|
|
|
|
for res in resources:
|
|
mime = res.get('mime_type', '')
|
|
ext = res.get('extension', '')
|
|
|
|
if mime.startswith('image/'):
|
|
grouped['images'].append(res)
|
|
elif mime.startswith('video/'):
|
|
grouped['videos'].append(res)
|
|
elif ext in ['pdf', 'doc', 'docx', 'txt', 'md', 'ppt', 'pptx', 'xls', 'xlsx']:
|
|
grouped['documents'].append(res)
|
|
else:
|
|
grouped['other'].append(res)
|
|
|
|
return grouped
|
|
|
|
def generate_summary(grouped):
|
|
"""Genera tabella riepilogo."""
|
|
rows = []
|
|
|
|
for type_name in ['images', 'videos', 'documents']:
|
|
resources = grouped.get(type_name, [])
|
|
count = len(resources)
|
|
total_size = sum(r.get('size_bytes', 0) for r in resources)
|
|
|
|
type_label = {
|
|
'images': 'Immagini',
|
|
'videos': 'Video',
|
|
'documents': 'Documenti'
|
|
}.get(type_name, type_name.title())
|
|
|
|
rows.append(f"| {type_label} | {count} | {format_size(total_size)} |")
|
|
|
|
return '\n'.join(rows)
|
|
|
|
def generate_images_table(resources):
|
|
"""Genera tabella immagini."""
|
|
if not resources:
|
|
return "_Nessuna immagine trovata_"
|
|
|
|
rows = []
|
|
header = "| File | Tipo | Dimensioni | Risoluzione | Descrizione | Tag | Use Case |"
|
|
separator = "|------|------|------------|-------------|-------------|-----|----------|"
|
|
|
|
for res in sorted(resources, key=lambda x: x.get('filename', '')):
|
|
filename = res.get('filename', 'Unknown')
|
|
ext = res.get('extension', '?').upper()
|
|
size = res.get('size_formatted', '?')
|
|
resolution = res.get('resolution', '-')
|
|
description = res.get('description', filename)
|
|
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
|
|
use_cases = ', '.join(res.get('use_cases', [])[:2])
|
|
|
|
rows.append(f"| `{filename}` | {ext} | {size} | {resolution} | {description} | {tags} | {use_cases} |")
|
|
|
|
return '\n'.join([header, separator] + rows)
|
|
|
|
def generate_videos_table(resources):
|
|
"""Genera tabella video."""
|
|
if not resources:
|
|
return "_Nessun video trovato_"
|
|
|
|
rows = []
|
|
header = "| File | Tipo | Dimensioni | Durata | Risoluzione | Descrizione | Tag | Use Case |"
|
|
separator = "|------|------|------------|--------|-------------|-------------|-----|----------|"
|
|
|
|
for res in sorted(resources, key=lambda x: x.get('filename', '')):
|
|
filename = res.get('filename', 'Unknown')
|
|
ext = res.get('extension', '?').upper()
|
|
size = res.get('size_formatted', '?')
|
|
resolution = res.get('resolution', '-')
|
|
description = res.get('description', filename)
|
|
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
|
|
use_cases = ', '.join(res.get('use_cases', [])[:2])
|
|
|
|
rows.append(f"| `{filename}` | {ext} | {size} | - | {resolution} | {description} | {tags} | {use_cases} |")
|
|
|
|
return '\n'.join([header, separator] + rows)
|
|
|
|
def generate_documents_table(resources):
|
|
"""Genera tabella documenti."""
|
|
if not resources:
|
|
return "_Nessun documento trovato_"
|
|
|
|
rows = []
|
|
header = "| File | Tipo | Dimensioni | Descrizione | Tag | Use Case |"
|
|
separator = "|------|------|------------|-------------|-----|----------|"
|
|
|
|
for res in sorted(resources, key=lambda x: x.get('filename', '')):
|
|
filename = res.get('filename', 'Unknown')
|
|
ext = res.get('extension', '?').upper()
|
|
size = res.get('size_formatted', '?')
|
|
description = res.get('description', filename)
|
|
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
|
|
use_cases = ', '.join(res.get('use_cases', [])[:2])
|
|
|
|
rows.append(f"| `{filename}` | {ext} | {size} | {description} | {tags} | {use_cases} |")
|
|
|
|
return '\n'.join([header, separator] + rows)
|
|
|
|
def generate_global_tags(resources):
|
|
"""Genera lista tag globali."""
|
|
all_tags = set()
|
|
|
|
for res in resources:
|
|
for tag in res.get('tags', []):
|
|
all_tags.add(tag)
|
|
|
|
if not all_tags:
|
|
return "_Nessun tag generato_"
|
|
|
|
# Ordina per frequenza (semplificato: alfabetico)
|
|
sorted_tags = sorted(all_tags)[:20] # Max 20 tag
|
|
return ' '.join(f"#{t}" for t in sorted_tags)
|
|
|
|
def generate_catalog(client_name, metadata, output_path, verbose=False):
|
|
"""Genera catalogo markdown completo."""
|
|
resources = metadata.get('resources', [])
|
|
generated = metadata.get('generated', datetime.now().isoformat())
|
|
|
|
# Raggruppa per tipo
|
|
grouped = group_by_type(resources)
|
|
|
|
# Costruisci catalogo
|
|
catalog = f"""# Asset Catalog — {client_name.replace('_', ' ').title()}
|
|
|
|
_Generato: {generated.split('T')[0]} | Totale: {len(resources)} risorse_
|
|
|
|
## Riepilogo
|
|
|
|
| Tipo | Count | Dimensione Totale |
|
|
|------|-------|-------------------|
|
|
{generate_summary(grouped)}
|
|
|
|
---
|
|
|
|
## Immagini ({len(grouped.get('images', []))})
|
|
|
|
{generate_images_table(grouped.get('images', []))}
|
|
|
|
---
|
|
|
|
## Video ({len(grouped.get('videos', []))})
|
|
|
|
{generate_videos_table(grouped.get('videos', []))}
|
|
|
|
---
|
|
|
|
## Documenti ({len(grouped.get('documents', []))})
|
|
|
|
{generate_documents_table(grouped.get('documents', []))}
|
|
|
|
---
|
|
|
|
## Tag Globali
|
|
|
|
{generate_global_tags(resources)}
|
|
|
|
---
|
|
|
|
## Note
|
|
|
|
- **Ultimo aggiornamento:** {generated.split('T')[0]}
|
|
- **Archivi originali:** `assets/archive/`
|
|
- **Per richiedere risorse:** Contatta @agency-archivist
|
|
- **Metadata completi:** `assets/.metadata.json`
|
|
"""
|
|
|
|
# Scrivi file
|
|
with open(output_path, 'w') as f:
|
|
f.write(catalog)
|
|
|
|
if verbose:
|
|
print(f"✅ Catalogo generato: {output_path}")
|
|
|
|
return output_path
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Genera catalogo markdown dai metadata')
|
|
parser.add_argument('--client', required=True, help='Nome cliente')
|
|
parser.add_argument('--input', help='Path metadata JSON (default: assets/.metadata.json)')
|
|
parser.add_argument('--output', help='Path output catalog.md (default: assets/catalog.md)')
|
|
parser.add_argument('--verbose', action='store_true', help='Log dettagliato')
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Path
|
|
workspace = Path.home() / '.openclaw' / 'workspace' / 'agency-skills-suite'
|
|
client_dir = workspace / 'clients' / args.client
|
|
assets_dir = client_dir / 'assets'
|
|
|
|
if not client_dir.exists():
|
|
print(f"❌ Cartella cliente non trovata: {client_dir}")
|
|
sys.exit(1)
|
|
|
|
if not assets_dir.exists():
|
|
print(f"❌ Cartella assets non trovata: {assets_dir}")
|
|
sys.exit(1)
|
|
|
|
# Input/Output path
|
|
input_path = args.input if args.input else assets_dir / '.metadata.json'
|
|
output_path = args.output if args.output else assets_dir / 'catalog.md'
|
|
|
|
if not input_path.exists():
|
|
print(f"❌ Metadata non trovati: {input_path}")
|
|
print(" Esegui prima: python scripts/scan_resources.py")
|
|
sys.exit(1)
|
|
|
|
if args.verbose:
|
|
print(f"📥 Input: {input_path}")
|
|
print(f"📝 Output: {output_path}")
|
|
print()
|
|
|
|
# Carica metadata
|
|
metadata = load_metadata(input_path)
|
|
|
|
# Genera catalogo
|
|
generate_catalog(args.client, metadata, output_path, args.verbose)
|
|
|
|
# Riepilogo
|
|
resources = metadata.get('resources', [])
|
|
print(f"\n✅ Catalogo generato!")
|
|
print(f" 📊 Risorse catalogate: {len(resources)}")
|
|
print(f" 📁 Catalogo: {output_path}")
|
|
print(f"\n👉 Il catalogo è pronto per essere usato dalle altre skill!")
|
|
|
|
if __name__ == '__main__':
|
|
main()
|