STEP 2: agency-archivist - Nuova skill gestione risorse

- agency-archivist/SKILL.md: Skill per upload, estrazione, catalogazione
- scripts/extract_archive.py: Estrazione zip/URL in clients/{client}/assets/
- scripts/scan_resources.py: Scansione metadata (2 passate: base + vision)
- scripts/generate_catalog.py: Generazione catalogo.md con tag e use case
- references/resource_types.md: Tipologie risorse e use case per skill
- agency-orchestrator/SKILL.md: Integrazione archivist in Fase 1
  - Step opzionale upload risorse
  - Sezione dedicata gestione risorse
  - Comportamento proattivo (richiesta risorse mancanti)
  - Pattern per altre skill

Integrazione completa: orchestrator → archivist → visual-generator/design/web/social
This commit is contained in:
AgentePotente 2026-03-10 23:29:41 +01:00
parent 6c1b04d39a
commit b1f3ba033e
7 changed files with 1658 additions and 3 deletions

View file

@ -0,0 +1,316 @@
#!/usr/bin/env python3
"""
extract_archive.py Estrae archivi (zip, tar, rar) e organizza risorse in clients/{client}/assets/
Usage:
python extract_archive.py <path_or_url> --client <client_name>
python extract_archive.py brand_assets.zip --client demo_co_srl
python extract_archive.py https://example.com/assets.zip --client demo_co_srl
Options:
--keep-archive Mantieni file originale (default: elimina dopo estrazione)
--verbose Log dettagliato
--dry-run Simula senza estrazione
"""
import os
import sys
import argparse
import zipfile
import tarfile
import shutil
import hashlib
from pathlib import Path
from datetime import datetime
# Try to import rarfile (optional, requires unrar)
try:
import rarfile
HAS_RAR = True
except ImportError:
HAS_RAR = False
# Mapping parole chiave → cartelle
CATEGORY_KEYWORDS = {
'images/logo': ['logo', 'marchio', 'brand', 'logotipo'],
'images/prodotto': ['prodotto', 'product', 'item', 'articolo'],
'images/team': ['team', 'staff', 'ufficio', 'office', 'persone', 'people'],
'images/stock': ['sfondo', 'background', 'texture', 'stock'],
'videos/promo': ['promo', 'reel', 'trailer', 'advertisement'],
'videos/tutorial': ['tutorial', 'howto', 'demo', 'dimostrazione', 'guida'],
'documents/brand': ['brand', 'guideline', 'manual', 'linee guida'],
'documents/product': ['scheda', 'datasheet', 'spec', 'specifiche'],
}
def get_file_type(filename):
"""Determina tipo file dall'estensione."""
ext = filename.lower().split('.')[-1]
image_exts = ['jpg', 'jpeg', 'png', 'gif', 'webp', 'svg', 'bmp', 'tiff']
video_exts = ['mp4', 'mov', 'avi', 'mkv', 'webm', 'wmv']
doc_exts = ['pdf', 'doc', 'docx', 'txt', 'md', 'ppt', 'pptx', 'xls', 'xlsx']
if ext in image_exts:
return 'images'
elif ext in video_exts:
return 'videos'
elif ext in doc_exts:
return 'documents'
else:
return 'other'
def categorize_file(filename, file_type):
"""Assegna categoria basata su parole chiave nel nome."""
filename_lower = filename.lower()
for category, keywords in CATEGORY_KEYWORDS.items():
base_type = category.split('/')[0]
if base_type == file_type:
for keyword in keywords:
if keyword in filename_lower:
return category
# Fallback: cartella base per tipo
return f"{file_type}/" if file_type != 'other' else 'misc/'
def get_file_size(path):
"""Restituisce dimensione file in bytes."""
return os.path.getsize(path)
def format_size(size_bytes):
"""Formatta dimensione in KB/MB/GB."""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024:
return f"{size_bytes:.1f} {unit}"
size_bytes /= 1024
return f"{size_bytes:.1f} TB"
def download_file(url, dest_path, verbose=False):
"""Download file da URL."""
import urllib.request
if verbose:
print(f"📥 Download: {url}")
try:
urllib.request.urlretrieve(url, dest_path)
if verbose:
print(f"✅ Download completato: {dest_path}")
return True
except Exception as e:
print(f"❌ Errore download: {e}")
return False
def extract_archive(archive_path, extract_to, verbose=False):
"""Estrae archivio e restituisce lista file estratti."""
extracted_files = []
# Determina formato
filename = os.path.basename(archive_path)
ext = filename.lower().split('.')[-1]
try:
if ext == 'zip' or filename.endswith('.tar.gz') or filename.endswith('.tgz'):
if ext == 'zip':
with zipfile.ZipFile(archive_path, 'r') as zip_ref:
zip_ref.extractall(extract_to)
extracted_files = zip_ref.namelist()
else:
with tarfile.open(archive_path, 'r:gz') as tar_ref:
tar_ref.extractall(extract_to)
extracted_files = tar_ref.getnames()
elif ext == 'rar':
if not HAS_RAR:
print("❌ Supporto RAR non disponibile. Installa: pip install rarfile unrar")
return []
with rarfile.RarFile(archive_path, 'r') as rar_ref:
rar_ref.extractall(extract_to)
extracted_files = rar_ref.namelist()
else:
print(f"❌ Formato .{ext} non supportato. Usa zip, tar.gz, o rar.")
return []
if verbose:
print(f"✅ Estratti {len(extracted_files)} file")
return extracted_files
except Exception as e:
print(f"❌ Errore estrazione: {e}")
return []
def organize_files(temp_dir, assets_dir, client, verbose=False):
"""Organizza file estratti per categoria."""
organized = []
# Crea struttura cartelle
for folder in ['images/logo', 'images/prodotto', 'images/team', 'images/stock',
'videos/promo', 'videos/tutorial', 'documents/brand', 'documents/product']:
os.makedirs(os.path.join(assets_dir, folder), exist_ok=True)
# Processa ogni file
for root, dirs, files in os.walk(temp_dir):
for filename in files:
# Salta file nascosti e system
if filename.startswith('.') or filename == 'Thumbs.db':
continue
src_path = os.path.join(root, filename)
file_type = get_file_type(filename)
category = categorize_file(filename, file_type)
# Path destinazione
dest_folder = os.path.join(assets_dir, category)
dest_path = os.path.join(dest_folder, filename)
# Gestisci nomi duplicati
base, ext = os.path.splitext(filename)
counter = 1
while os.path.exists(dest_path):
dest_path = os.path.join(dest_folder, f"{base}_{counter}{ext}")
counter += 1
# Copia file
shutil.copy2(src_path, dest_path)
organized.append({
'original': filename,
'destination': os.path.relpath(dest_path, assets_dir),
'type': file_type,
'category': category,
'size': get_file_size(dest_path)
})
if verbose:
print(f" 📁 {filename}{category}/")
return organized
def log_operation(client, archive_name, organized_files, ops_log_path):
"""Registra operazione nel run log."""
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M')
log_entry = f"""
## {timestamp} — Archivist Upload
- **Archivio:** `{archive_name}`
- **File estratti:** {len(organized_files)}
- **Status:** Completato
### Dettagli
| Tipo | Count | Dimensione Totale |
|------|-------|-------------------|
| Immagini | {sum(1 for f in organized_files if f['type'] == 'images')} | {format_size(sum(f['size'] for f in organized_files if f['type'] == 'images'))} |
| Video | {sum(1 for f in organized_files if f['type'] == 'videos')} | {format_size(sum(f['size'] for f in organized_files if f['type'] == 'videos'))} |
| Documenti | {sum(1 for f in organized_files if f['type'] == 'documents')} | {format_size(sum(f['size'] for f in organized_files if f['type'] == 'documents'))} |
"""
with open(ops_log_path, 'a') as f:
f.write(log_entry)
def main():
parser = argparse.ArgumentParser(description='Estrae archivi e organizza risorse')
parser.add_argument('path_or_url', help='Path archivio o URL')
parser.add_argument('--client', required=True, help='Nome cliente (cartella clients/{client}/)')
parser.add_argument('--keep-archive', action='store_true', help='Mantieni archivio originale')
parser.add_argument('--verbose', action='store_true', help='Log dettagliato')
parser.add_argument('--dry-run', action='store_true', help='Simula senza estrazione')
args = parser.parse_args()
# Workspace root
workspace = Path.home() / '.openclaw' / 'workspace' / 'agency-skills-suite'
clients_dir = workspace / 'clients'
# Cartella cliente
client_dir = clients_dir / args.client
assets_dir = client_dir / 'assets'
archive_dir = assets_dir / 'archive'
ops_log = client_dir / 'ops' / 'run_log.md'
# Verifica esistenza cartella cliente
if not client_dir.exists():
print(f"❌ Cartella cliente non trovata: {client_dir}")
print(f" Crea prima il progetto con agency-orchestrator")
sys.exit(1)
# Crea cartelle necessarie
os.makedirs(archive_dir, exist_ok=True)
os.makedirs(client_dir / 'ops', exist_ok=True)
# Determina se è URL o path locale
is_url = args.path_or_url.startswith('http://') or args.path_or_url.startswith('https://') or args.path_or_url.startswith('ftp://')
if is_url:
# Download
archive_name = os.path.basename(args.path_or_url.split('?')[0])
archive_path = archive_dir / archive_name
if args.dry_run:
print(f"🔍 [DRY-RUN] Download: {args.path_or_url}{archive_path}")
sys.exit(0)
if not download_file(args.path_or_url, str(archive_path), args.verbose):
sys.exit(1)
else:
# Path locale
archive_path = Path(args.path_or_url)
archive_name = archive_path.name
if not archive_path.exists():
print(f"❌ File non trovato: {archive_path}")
sys.exit(1)
if args.dry_run:
print(f"🔍 [DRY-RUN] Estrai: {archive_path}{assets_dir}")
sys.exit(0)
# Copia in archive/
shutil.copy2(archive_path, archive_dir / archive_name)
if args.verbose:
print(f"\n📦 Archivio: {archive_name}")
print(f"📁 Destinazione: {assets_dir}")
print()
# Estrai in temporanea
temp_dir = archive_dir / '.temp_extract'
os.makedirs(temp_dir, exist_ok=True)
print("🔄 Estrazione in corso...")
extracted = extract_archive(str(archive_dir / archive_name), str(temp_dir), args.verbose)
if not extracted:
print("❌ Nessun file estratto")
shutil.rmtree(temp_dir)
sys.exit(1)
# Organizza file
print("\n🗂️ Organizzazione file...")
organized = organize_files(temp_dir, assets_dir, args.client, args.verbose)
# Pulisci temporanea
shutil.rmtree(temp_dir)
# Log operazione
log_operation(args.client, archive_name, organized, ops_log)
# Elimina archivio originale (se non --keep-archive)
if not args.keep_archive:
os.remove(archive_dir / archive_name)
if args.verbose:
print(f"\n🗑️ Archivio originale eliminato")
# Riepilogo
print(f"\n✅ Completato!")
print(f" 📦 File estratti: {len(organized)}")
print(f" 📁 Cartella: {assets_dir}")
print(f" 📝 Log: {ops_log}")
print(f"\n👉 Prossimo step: python scripts/scan_resources.py --client {args.client}")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,266 @@
#!/usr/bin/env python3
"""
generate_catalog.py Genera catalogo markdown dai metadata delle risorse
Usage:
python generate_catalog.py --client <client_name>
python generate_catalog.py --client demo_co_srl
Options:
--input Path metadata JSON (default: assets/.metadata.json)
--output Path output catalog.md (default: assets/catalog.md)
--verbose Log dettagliato
"""
import os
import sys
import argparse
import json
from pathlib import Path
from datetime import datetime
from collections import defaultdict
def load_metadata(input_path):
"""Carica metadata da JSON."""
with open(input_path, 'r') as f:
return json.load(f)
def format_size(size_bytes):
"""Formatta dimensione in KB/MB/GB."""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024:
return f"{size_bytes:.1f} {unit}"
size_bytes /= 1024
return f"{size_bytes:.1f} TB"
def group_by_type(resources):
"""Raggruppa risorse per tipo."""
grouped = defaultdict(list)
for res in resources:
mime = res.get('mime_type', '')
ext = res.get('extension', '')
if mime.startswith('image/'):
grouped['images'].append(res)
elif mime.startswith('video/'):
grouped['videos'].append(res)
elif ext in ['pdf', 'doc', 'docx', 'txt', 'md', 'ppt', 'pptx', 'xls', 'xlsx']:
grouped['documents'].append(res)
else:
grouped['other'].append(res)
return grouped
def generate_summary(grouped):
"""Genera tabella riepilogo."""
rows = []
for type_name in ['images', 'videos', 'documents']:
resources = grouped.get(type_name, [])
count = len(resources)
total_size = sum(r.get('size_bytes', 0) for r in resources)
type_label = {
'images': 'Immagini',
'videos': 'Video',
'documents': 'Documenti'
}.get(type_name, type_name.title())
rows.append(f"| {type_label} | {count} | {format_size(total_size)} |")
return '\n'.join(rows)
def generate_images_table(resources):
"""Genera tabella immagini."""
if not resources:
return "_Nessuna immagine trovata_"
rows = []
header = "| File | Tipo | Dimensioni | Risoluzione | Descrizione | Tag | Use Case |"
separator = "|------|------|------------|-------------|-------------|-----|----------|"
for res in sorted(resources, key=lambda x: x.get('filename', '')):
filename = res.get('filename', 'Unknown')
ext = res.get('extension', '?').upper()
size = res.get('size_formatted', '?')
resolution = res.get('resolution', '-')
description = res.get('description', filename)
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
use_cases = ', '.join(res.get('use_cases', [])[:2])
rows.append(f"| `{filename}` | {ext} | {size} | {resolution} | {description} | {tags} | {use_cases} |")
return '\n'.join([header, separator] + rows)
def generate_videos_table(resources):
"""Genera tabella video."""
if not resources:
return "_Nessun video trovato_"
rows = []
header = "| File | Tipo | Dimensioni | Durata | Risoluzione | Descrizione | Tag | Use Case |"
separator = "|------|------|------------|--------|-------------|-------------|-----|----------|"
for res in sorted(resources, key=lambda x: x.get('filename', '')):
filename = res.get('filename', 'Unknown')
ext = res.get('extension', '?').upper()
size = res.get('size_formatted', '?')
resolution = res.get('resolution', '-')
description = res.get('description', filename)
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
use_cases = ', '.join(res.get('use_cases', [])[:2])
rows.append(f"| `{filename}` | {ext} | {size} | - | {resolution} | {description} | {tags} | {use_cases} |")
return '\n'.join([header, separator] + rows)
def generate_documents_table(resources):
"""Genera tabella documenti."""
if not resources:
return "_Nessun documento trovato_"
rows = []
header = "| File | Tipo | Dimensioni | Descrizione | Tag | Use Case |"
separator = "|------|------|------------|-------------|-----|----------|"
for res in sorted(resources, key=lambda x: x.get('filename', '')):
filename = res.get('filename', 'Unknown')
ext = res.get('extension', '?').upper()
size = res.get('size_formatted', '?')
description = res.get('description', filename)
tags = ', '.join(f"#{t}" for t in res.get('tags', [])[:5])
use_cases = ', '.join(res.get('use_cases', [])[:2])
rows.append(f"| `{filename}` | {ext} | {size} | {description} | {tags} | {use_cases} |")
return '\n'.join([header, separator] + rows)
def generate_global_tags(resources):
"""Genera lista tag globali."""
all_tags = set()
for res in resources:
for tag in res.get('tags', []):
all_tags.add(tag)
if not all_tags:
return "_Nessun tag generato_"
# Ordina per frequenza (semplificato: alfabetico)
sorted_tags = sorted(all_tags)[:20] # Max 20 tag
return ' '.join(f"#{t}" for t in sorted_tags)
def generate_catalog(client_name, metadata, output_path, verbose=False):
"""Genera catalogo markdown completo."""
resources = metadata.get('resources', [])
generated = metadata.get('generated', datetime.now().isoformat())
# Raggruppa per tipo
grouped = group_by_type(resources)
# Costruisci catalogo
catalog = f"""# Asset Catalog — {client_name.replace('_', ' ').title()}
_Generato: {generated.split('T')[0]} | Totale: {len(resources)} risorse_
## Riepilogo
| Tipo | Count | Dimensione Totale |
|------|-------|-------------------|
{generate_summary(grouped)}
---
## Immagini ({len(grouped.get('images', []))})
{generate_images_table(grouped.get('images', []))}
---
## Video ({len(grouped.get('videos', []))})
{generate_videos_table(grouped.get('videos', []))}
---
## Documenti ({len(grouped.get('documents', []))})
{generate_documents_table(grouped.get('documents', []))}
---
## Tag Globali
{generate_global_tags(resources)}
---
## Note
- **Ultimo aggiornamento:** {generated.split('T')[0]}
- **Archivi originali:** `assets/archive/`
- **Per richiedere risorse:** Contatta @agency-archivist
- **Metadata completi:** `assets/.metadata.json`
"""
# Scrivi file
with open(output_path, 'w') as f:
f.write(catalog)
if verbose:
print(f"✅ Catalogo generato: {output_path}")
return output_path
def main():
parser = argparse.ArgumentParser(description='Genera catalogo markdown dai metadata')
parser.add_argument('--client', required=True, help='Nome cliente')
parser.add_argument('--input', help='Path metadata JSON (default: assets/.metadata.json)')
parser.add_argument('--output', help='Path output catalog.md (default: assets/catalog.md)')
parser.add_argument('--verbose', action='store_true', help='Log dettagliato')
args = parser.parse_args()
# Path
workspace = Path.home() / '.openclaw' / 'workspace' / 'agency-skills-suite'
client_dir = workspace / 'clients' / args.client
assets_dir = client_dir / 'assets'
if not client_dir.exists():
print(f"❌ Cartella cliente non trovata: {client_dir}")
sys.exit(1)
if not assets_dir.exists():
print(f"❌ Cartella assets non trovata: {assets_dir}")
sys.exit(1)
# Input/Output path
input_path = args.input if args.input else assets_dir / '.metadata.json'
output_path = args.output if args.output else assets_dir / 'catalog.md'
if not input_path.exists():
print(f"❌ Metadata non trovati: {input_path}")
print(" Esegui prima: python scripts/scan_resources.py")
sys.exit(1)
if args.verbose:
print(f"📥 Input: {input_path}")
print(f"📝 Output: {output_path}")
print()
# Carica metadata
metadata = load_metadata(input_path)
# Genera catalogo
generate_catalog(args.client, metadata, output_path, args.verbose)
# Riepilogo
resources = metadata.get('resources', [])
print(f"\n✅ Catalogo generato!")
print(f" 📊 Risorse catalogate: {len(resources)}")
print(f" 📁 Catalogo: {output_path}")
print(f"\n👉 Il catalogo è pronto per essere usato dalle altre skill!")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,345 @@
#!/usr/bin/env python3
"""
scan_resources.py Scansiona risorse in clients/{client}/assets/ ed estrae metadata
Usage:
python scan_resources.py --client <client_name> --pass 1|2
python scan_resources.py --client demo_co_srl --pass 1
python scan_resources.py --client demo_co_srl --pass 2 --vision
Options:
--pass 1 Solo metadata base (veloce, sempre disponibile)
--pass 2 Analisi contenuto (richiede modello vision)
--vision Abilita analisi visione (opzionale, richiede API)
--output Path output JSON (default: assets/.metadata.json)
--verbose Log dettagliato
"""
import os
import sys
import argparse
import json
from pathlib import Path
from datetime import datetime
from PIL import Image
import mimetypes
def get_file_metadata(filepath):
"""Estrae metadata base da file."""
stat = os.stat(filepath)
metadata = {
'filename': os.path.basename(filepath),
'path': str(filepath),
'extension': filepath.suffix.lower().lstrip('.'),
'size_bytes': stat.st_size,
'size_formatted': format_size(stat.st_size),
'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(),
'mime_type': mimetypes.guess_type(filepath)[0] or 'application/octet-stream'
}
# Metadata specifici per immagini
if metadata['mime_type'].startswith('image/'):
try:
with Image.open(filepath) as img:
metadata['width'] = img.width
metadata['height'] = img.height
metadata['resolution'] = f"{img.width}x{img.height}"
metadata['mode'] = img.mode
metadata['format'] = img.format
# Colori dominanti (semplificato)
if img.mode in ('RGB', 'RGBA'):
img_resized = img.resize((50, 50))
colors = img_resized.getcolors(2500)
if colors:
# Top 3 colori
top_colors = sorted(colors, reverse=True)[:3]
metadata['dominant_colors'] = [
rgb_to_hex(c[1]) for c in top_colors if c[1][0] is not None
]
except Exception as e:
metadata['error'] = f"Errore lettura immagine: {e}"
# Metadata specifici per video (semplificato, richiede opencv per dettagli)
elif metadata['mime_type'].startswith('video/'):
metadata['type'] = 'video'
# Nota: per durata e risoluzione video serve opencv o ffprobe
return metadata
def format_size(size_bytes):
"""Formatta dimensione in KB/MB/GB."""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024:
return f"{size_bytes:.1f} {unit}"
size_bytes /= 1024
return f"{size_bytes:.1f} TB"
def rgb_to_hex(rgb):
"""Converte tuple RGB in esadecimale."""
try:
return '#{:02x}{:02x}{:02x}'.format(int(rgb[0]), int(rgb[1]), int(rgb[2]))
except:
return '#000000'
def categorize_file(filename, filepath):
"""Assegna categoria basata su path e nome file."""
path_str = str(filepath).lower()
filename_lower = filename.lower()
# Dalla cartella
if '/logo/' in path_str:
return 'logo'
elif '/prodotto/' in path_str or '/product/' in path_str:
return 'prodotto'
elif '/team/' in path_str or '/people/' in path_str:
return 'team'
elif '/stock/' in path_str or '/background/' in path_str:
return 'stock'
elif '/promo/' in path_str or '/reel/' in path_str:
return 'promo'
elif '/tutorial/' in path_str or '/howto/' in path_str:
return 'tutorial'
elif '/brand/' in path_str or '/guideline/' in path_str:
return 'brand_guidelines'
elif '/product/' in path_str or '/datasheet/' in path_str:
return 'product_docs'
# Dal nome file
keywords = {
'logo': ['logo', 'marchio', 'brand'],
'prodotto': ['prodotto', 'product', 'item'],
'team': ['team', 'staff', 'ufficio', 'people'],
'stock': ['sfondo', 'background', 'texture'],
'promo': ['promo', 'reel', 'trailer'],
'tutorial': ['tutorial', 'howto', 'demo'],
}
for category, words in keywords.items():
for word in words:
if word in filename_lower:
return category
return 'generic'
def generate_tags(metadata, category):
"""Genera tag automatici dai metadata."""
tags = []
# Tag da categoria
tags.append(category)
# Tag da tipo file
ext = metadata.get('extension', '')
if ext in ['png']:
tags.append('trasparente' if metadata.get('mode') == 'RGBA' else 'png')
elif ext in ['jpg', 'jpeg']:
tags.append('jpg')
elif ext in ['svg']:
tags.append('vettoriale')
# Tag da dimensioni
if metadata.get('width'):
w = metadata['width']
h = metadata.get('height', 0)
if w >= 1920 and h >= 1080:
tags.append('fullhd')
if w >= 3000:
tags.append('highres')
if w == h:
tags.append('quadrato')
elif w > h:
tags.append('orizzontale')
else:
tags.append('verticale')
# Tag da colori
if 'dominant_colors' in metadata:
colors = metadata['dominant_colors']
if '#ffffff' in colors or '#f0f0f0' in colors:
tags.append('sfondochiaro')
if '#000000' in colors or '#1a1a1a' in colors:
tags.append('sfondoscuro')
return list(set(tags))
def scan_directory(assets_dir, pass_level=1, verbose=False):
"""Scansiona directory assets/ ed estrae metadata."""
resources = []
# Cartelle da scansionare
folders_to_scan = ['images', 'videos', 'documents']
for folder in folders_to_scan:
folder_path = assets_dir / folder
if not folder_path.exists():
continue
if verbose:
print(f"📁 Scansione {folder}/...")
# Walk ricorsivo
for root, dirs, files in os.walk(folder_path):
for filename in files:
# Salta file nascosti
if filename.startswith('.'):
continue
filepath = Path(root) / filename
if verbose:
print(f" 🔍 {filename}")
# Metadata base (Pass 1)
metadata = get_file_metadata(filepath)
# Categoria
rel_path = filepath.relative_to(assets_dir)
category = categorize_file(filename, filepath)
metadata['category'] = category
# Tag
metadata['tags'] = generate_tags(metadata, category)
# Use case suggeriti (basati su categoria)
metadata['use_cases'] = suggest_use_cases(category, metadata)
# Descrizione base (nome file + categoria)
metadata['description'] = generate_base_description(filename, category, metadata)
resources.append(metadata)
return resources
def suggest_use_cases(category, metadata):
"""Suggerisce use case basati su categoria e metadata."""
use_cases = {
'logo': ['Header sito', 'Social profile', 'Firma email', 'Biglietti da visita'],
'prodotto': ['E-commerce', 'Social post', 'Catalogo', 'Ads'],
'team': ['About page', 'LinkedIn', 'Presentazioni', 'Stampa'],
'stock': ['Sfondi sito', 'Social post', 'Presentazioni', 'Blog'],
'promo': ['Social ads', 'Homepage', 'YouTube', 'Email marketing'],
'tutorial': ['Sito web', 'YouTube', 'Supporto clienti', 'Onboarding'],
'brand_guidelines': ['Design system', 'Coerenza brand', 'Linee guida team'],
'product_docs': ['Schede prodotto', 'Supporto vendite', 'FAQ'],
'generic': ['Utilizzo generale']
}
base_cases = use_cases.get(category, ['Utilizzo generale'])
# Aggiungi in base a risoluzione
if metadata.get('width', 0) >= 1920:
base_cases.append('Stampa alta qualità')
return base_cases
def generate_base_description(filename, category, metadata):
"""Genera descrizione base dal nome file e metadata."""
# Rimuovi estensione e underscore
name = os.path.splitext(filename)[0].replace('_', ' ').replace('-', ' ')
# Capitalizza
name = name.title()
# Aggiungi dettagli
parts = [name]
if metadata.get('resolution'):
parts.append(f"({metadata['resolution']})")
if metadata.get('size_formatted'):
parts.append(f"{metadata['size_formatted']}")
return ' '.join(parts)
def analyze_with_vision(resources, verbose=False):
"""
Analisi avanzata con modello vision (placeholder per integrazione futura).
Questa funzione richiede integrazione con API di modelli vision
(es. GPT-4V, Claude Vision, etc.) per analizzare contenuto immagini.
Per ora è un placeholder che descrive l'integrazione futura.
"""
if verbose:
print("\n👁️ Analisi visione (placeholder)")
print(" Integrazione futura con API modelli vision:")
print(" - GPT-4V (OpenAI)")
print(" - Claude Vision (Anthropic)")
print(" - Gemini Vision (Google)")
print("\n Per ogni immagine:")
print(" 1. Invia immagine a API")
print(" 2. Ricevi descrizione semantica")
print(" 3. Estrai: oggetti, contesto, colori, testo")
print(" 4. Aggiorna metadata['description'] e metadata['tags']")
# Placeholder: nessun cambiamento ai resources
return resources
def save_metadata(resources, output_path):
"""Salva metadata in JSON."""
with open(output_path, 'w') as f:
json.dump({
'generated': datetime.now().isoformat(),
'total_resources': len(resources),
'resources': resources
}, f, indent=2, ensure_ascii=False)
return output_path
def main():
parser = argparse.ArgumentParser(description='Scansiona risorse ed estrae metadata')
parser.add_argument('--client', required=True, help='Nome cliente')
parser.add_argument('--pass', type=int, choices=[1, 2], default=1, dest='pass_level',
help='Livello analisi: 1=base, 2=vision')
parser.add_argument('--vision', action='store_true', help='Abilita analisi visione')
parser.add_argument('--output', help='Path output JSON (default: assets/.metadata.json)')
parser.add_argument('--verbose', action='store_true', help='Log dettagliato')
args = parser.parse_args()
# Path
workspace = Path.home() / '.openclaw' / 'workspace' / 'agency-skills-suite'
client_dir = workspace / 'clients' / args.client
assets_dir = client_dir / 'assets'
if not client_dir.exists():
print(f"❌ Cartella cliente non trovata: {client_dir}")
sys.exit(1)
if not assets_dir.exists():
print(f"❌ Cartella assets non trovata: {assets_dir}")
print(" Esegui prima: python scripts/extract_archive.py")
sys.exit(1)
# Output path
output_path = args.output if args.output else assets_dir / '.metadata.json'
if args.verbose:
print(f"🔍 Scansione: {assets_dir}")
print(f"📝 Output: {output_path}")
print(f"📊 Pass: {args.pass_level} {'(vision)' if args.vision else '(base)'}")
print()
# Scansione
resources = scan_directory(assets_dir, args.pass_level, args.verbose)
# Analisi visione (opzionale)
if args.pass_level == 2 or args.vision:
resources = analyze_with_vision(resources, args.verbose)
# Salva metadata
save_metadata(resources, output_path)
# Riepilogo
print(f"\n✅ Scansione completata!")
print(f" 📊 Risorse trovate: {len(resources)}")
print(f" 📁 Immagini: {sum(1 for r in resources if r['mime_type'].startswith('image/'))}")
print(f" 🎬 Video: {sum(1 for r in resources if r['mime_type'].startswith('video/'))}")
print(f" 📄 Documenti: {sum(1 for r in resources if r['mime_type'].startswith('application/') or r['extension'] in ['pdf', 'doc', 'docx'])}")
print(f" 💾 Metadata: {output_path}")
print(f"\n👉 Prossimo step: python scripts/generate_catalog.py --client {args.client}")
if __name__ == '__main__':
main()