reportes con datos de fechas y reportes diferidos corregidos

This commit is contained in:
Dulce
2025-12-15 13:32:53 -07:00
parent 421aa0c0da
commit dad4fa2191
2 changed files with 364 additions and 132 deletions

View File

@@ -1,4 +1,5 @@
from celery import shared_task
from api.organization.models import Organizacion
from django.core.files.base import ContentFile
from django.utils import timezone
from api.reports.models import ReportDocument
@@ -112,12 +113,39 @@ def generate_report_control_pedimento(report_id):
pedimento_ids = list(pedimentos_qs.values_list('id', flat=True))
rfcs_raw = list(pedimentos_qs.values_list('agente_aduanal', flat=True))
# inicializar totales
pedimentos_completos = 0
total_documentos = 0
documentos_sin_descargar = 0
nombre_organizacion = ''
if filters.get('organizacion_id'):
try:
# Asumo que tienes un modelo Organizacion - ajusta según tu modelo real
organizacion = Organizacion.objects.get(id=filters['organizacion_id'])
nombre_organizacion = organizacion.nombre # ajusta el campo según tu modelo
except Organizacion.DoesNotExist:
nombre_organizacion = f"ID: {filters['organizacion_id']}"
except Exception as e:
nombre_organizacion = f"Error: {str(e)}"
# lista de rfc
rfc_list = ', '.join(sorted(set([rfc for rfc in rfcs_raw if rfc])))
fecha_inicio = ''
fecha_fin = ''
if pedimentos_qs.exists():
primer_pedimento = pedimentos_qs.order_by('fecha_pago').first()
if primer_pedimento and primer_pedimento.fecha_pago:
fecha_inicio = primer_pedimento.fecha_pago.strftime('%Y-%m-%d')
ultimo_pedimento = pedimentos_qs.order_by('-fecha_pago').first()
if ultimo_pedimento and ultimo_pedimento.fecha_pago:
fecha_fin = ultimo_pedimento.fecha_pago.strftime('%Y-%m-%d')
# Para cada pedimento, verificar si está completo
for pedimento in pedimentos_qs:
# Contar documentos de este pedimento
@@ -216,12 +244,15 @@ def generate_report_control_pedimento(report_id):
# SECCIÓN DE TOTALES
writer.writerow(['RESUMEN DEL REPORTE - CONTROL DE PEDIMENTOS'])
writer.writerow(['OOGANIZACION:', nombre_organizacion])
writer.writerow([])
writer.writerow(['TOTAL DE EXPEDIENTES:', pedimentos_total])
writer.writerow(['TOTAL DE EXPEDIENTES COMPLETOS:', pedimentos_completos])
writer.writerow(['TOTAL DE DOCUMENTOS:', total_documentos])
writer.writerow(['DOCUMENTOS SIN DESCARGAR:', documentos_sin_descargar])
writer.writerow(['PORCENTAJE DE DOCUMENTOS FALTANTES (%):', f"{porcentaje_faltantes:.2f}%"])
writer.writerow(['DESDE: ', fecha_inicio, ' HASTA: ', fecha_fin])
writer.writerow(['LISTA RFC:', rfc_list])
writer.writerow([])
writer.writerow([])

View File

@@ -52,6 +52,8 @@ import uuid
import datetime
import zipfile
from django.db import models
import logging
logger = logging.getLogger(__name__)
def export_model_to_csv(request, model_name, fields, module='datastage', filters=None):
model = apps.get_model(module, model_name)
@@ -114,7 +116,7 @@ class ExportDataStageView(APIView):
# Constantes para partición
# MAX_RECORDS_PER_FILE = 100 # Límite seguro por archivo
MAX_RECORDS_PER_FILE = 50000 # Límite seguro por archivo
MAX_RECORDS_PER_FILE = 120000 # Límite seguro por archivo
def safe_excel_value(self, value):
"""
@@ -190,17 +192,21 @@ class ExportDataStageView(APIView):
if not models_data:
return Response({'error': 'models are required for multiple export'}, status=status.HTTP_400_BAD_REQUEST)
logger.info("🚀" * 40)
logger.info("🚀 MODO MÚLTIPLE INICIADO")
logger.info(f"🚀 Request completo: {request.data}")
logger.info(f"🚀 RFC en request: '{request.data.get('globalFilters', {}).get('rfc')}'")
related_keys = self.get_related_keys_from_filters(global_filters, models_data, request.user)
total_estimated_records = self.estimate_total_records(models_data, global_filters, related_keys, request.user)
if total_estimated_records > self.MAX_RECORDS_PER_FILE:
if export_type == 'excel':
return self.export_datastage_multiple_partitioned_excel(request, models_data, global_filters, related_keys)
else:
return self.export_datastage_multiple_partitioned_csv(request, models_data, global_filters, related_keys)
if export_type == 'excel':
# Siempre usar el método particionado inteligente para Excel
return self.export_datastage_multiple_partitioned_excel(request, models_data, global_filters, related_keys)
else:
if export_type == 'excel':
return self.export_datastage_multiple_to_excel(request, models_data, global_filters, related_keys)
# Para CSV, podemos mantener la lógica actual o mejorarla
total_estimated_records = self.estimate_total_records(models_data, global_filters, related_keys, request.user)
if total_estimated_records > self.MAX_RECORDS_PER_FILE:
return self.export_datastage_multiple_partitioned_csv(request, models_data, global_filters, related_keys)
else:
return self.export_datastage_multiple_to_csv(request, models_data, global_filters, related_keys)
@@ -281,11 +287,15 @@ class ExportDataStageView(APIView):
return response
def export_datastage_multiple_partitioned_excel(self, request, models_data, global_filters, related_keys):
"""Exporta múltiples modelos de DataStage a múltiples archivos Excel particionados"""
"""Exporta múltiples modelos de DataStage a múltiples archivos Excel particionados inteligentemente"""
try:
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
file_counter = 1
current_wb = None
current_file_records_count = 0
MAX_SHEETS_PER_FILE = 10 # Límite de hojas por archivo Excel
for model_data in models_data:
model_name = model_data.get('model')
@@ -298,17 +308,17 @@ class ExportDataStageView(APIView):
model = apps.get_model('datastage', model_name)
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
# Si hay filtros, aplicarlos; si no, obtener todos los registros
if filters:
queryset = model.objects.filter(**filters).values(*fields)
else:
queryset = model.objects.none() # No obtener nada si no hay filtros
queryset = model.objects.none()
total_records = queryset.count()
if total_records == 0:
continue
# Si el modelo necesita particionarse (más de MAX_RECORDS_PER_FILE)
if total_records > self.MAX_RECORDS_PER_FILE:
from django.core.paginator import Paginator
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
@@ -316,29 +326,62 @@ class ExportDataStageView(APIView):
for page_num in paginator.page_range:
page = paginator.page(page_num)
wb = openpyxl.Workbook()
ws = wb.active
ws.title = f"Parte_{page_num}"[:31]
# Verificar si necesitamos crear nuevo archivo
# 1. Si no hay archivo actual
# 2. Si ya tenemos muchas hojas en este archivo
# 3. Si este archivo ya está "lleno" (muchos registros)
if (current_wb is None or
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
current_file_records_count > self.MAX_RECORDS_PER_FILE * 3): # ~150K registros
if current_wb is not None:
# Guardar archivo actual en ZIP
part_buffer = io.BytesIO()
current_wb.save(part_buffer)
part_buffer.seek(0)
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
file_counter += 1
# Crear nuevo workbook
current_wb = openpyxl.Workbook()
current_wb.remove(current_wb.active) # Remover hoja por defecto
current_file_records_count = 0
# Crear hoja para esta parte del modelo
sheet_name = f"{model_name[:25]}_p{page_num}"
ws = current_wb.create_sheet(title=sheet_name[:31])
ws.append(fields)
# Escribir datos
for row in page.object_list:
row_values = [self.safe_excel_value(row[field]) for field in fields]
ws.append(row_values)
# Guardar parte en ZIP
part_buffer = io.BytesIO()
wb.save(part_buffer)
part_buffer.seek(0)
filename = f"{model_name}_part{page_num}.xlsx"
zip_file.writestr(filename, part_buffer.getvalue())
current_file_records_count += len(page.object_list)
else:
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "Datos"[:31]
# Modelo pequeño (≤ MAX_RECORDS_PER_FILE)
# Verificar si necesitamos nuevo archivo
if (current_wb is None or
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
current_file_records_count + total_records > self.MAX_RECORDS_PER_FILE * 3):
if current_wb is not None:
# Guardar archivo actual
part_buffer = io.BytesIO()
current_wb.save(part_buffer)
part_buffer.seek(0)
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
file_counter += 1
# Crear nuevo workbook
current_wb = openpyxl.Workbook()
current_wb.remove(current_wb.active)
current_file_records_count = 0
# Crear hoja para este modelo
sheet_name = model_name[:31]
ws = current_wb.create_sheet(title=sheet_name)
ws.append(fields)
# Escribir datos
@@ -346,25 +389,22 @@ class ExportDataStageView(APIView):
row_values = [self.safe_excel_value(row[field]) for field in fields]
ws.append(row_values)
part_buffer = io.BytesIO()
wb.save(part_buffer)
part_buffer.seek(0)
current_file_records_count += total_records
filename = f"{model_name}.xlsx"
zip_file.writestr(filename, part_buffer.getvalue())
except LookupError as e:
except LookupError:
continue
except Exception as e:
continue
# Guardar el último workbook si existe
if current_wb is not None:
part_buffer = io.BytesIO()
current_wb.save(part_buffer)
part_buffer.seek(0)
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
zip_buffer.seek(0)
zip_content = zip_buffer.getvalue()
response = HttpResponse(zip_content, content_type='application/zip')
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
response['Content-Length'] = len(zip_content)
return response
except Exception as e:
@@ -488,7 +528,7 @@ class ExportDataStageView(APIView):
except Exception as e:
return Response({'error': f'Error en exportación CSV particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def export_single_model_partitioned(self, request, model_name, fields, filters, total_records):
"""Exporta un solo modelo particionado a ZIP"""
try:
@@ -578,62 +618,71 @@ class ExportDataStageView(APIView):
def get_related_keys_from_filters(self, global_filters, models_data, user):
"""
Obtiene patentes, pedimentos y datastages que cumplen EXACTAMENTE con TODOS los filtros globales
para usarlos como relación entre modelos
VERSIÓN SIMPLIFICADA - Usa la MISMA lógica que apply_global_filters_to_model
"""
import logging
logger = logging.getLogger(__name__)
related_keys = {
'patentes': set(),
'pedimentos': set(),
'datastage_ids': set()
}
# Si no hay filtros globales, retornar vacío (no hay relación)
if not any(global_filters.values()):
logger.info("🔥" * 60)
logger.info("🔥 DEBUG get_related_keys_from_filters - COMPARACIÓN CON SINGULAR")
logger.info(f"🔥 Filtros recibidos: {global_filters}")
logger.info(f"🔥 RFC específico: '{global_filters.get('rfc')}'")
# Si no hay filtros, retornar vacío
if not any(v for v in global_filters.values() if v not in [None, '']):
return {}
all_records_with_filters = []
# Buscar en TODOS los modelos que puedan tener los campos de filtro
for model_data in models_data:
model_name = model_data.get('model')
logger.info(f"\n🔥 PROCESANDO: {model_name}")
try:
model = apps.get_model('datastage', model_name)
model_fields = [f.name for f in model._meta.get_fields()]
# Construir filtros EXACTOS con TODOS los campos disponibles
filters = {}
has_any_filter = False
# ¡USAR LA MISMA FUNCIÓN QUE EN MODO SINGULAR!
filters = self.apply_global_filters_to_model(global_filters, model, user)
if 'organizacion' in model_fields and global_filters.get('organizacion'):
filters['organizacion'] = global_filters['organizacion']
has_any_filter = True
logger.info(f"🔥 Filtros después de apply_global_filters_to_model: {filters}")
if 'patente' in model_fields and global_filters.get('patente'):
filters['patente'] = global_filters['patente']
has_any_filter = True
if 'pedimento' in model_fields and global_filters.get('pedimento'):
filters['pedimento'] = global_filters['pedimento']
has_any_filter = True
if 'rfc' in model_fields and global_filters.get('rfc'):
filters['rfc'] = global_filters['rfc']
has_any_filter = True
if 'fecha_pago_real' in model_fields:
if global_filters.get('fecha_pago_desde'):
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
has_any_filter = True
if global_filters.get('fecha_pago_hasta'):
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
has_any_filter = True
if has_any_filter:
records = model.objects.filter(**filters).values('patente', 'pedimento', 'datastage_id')
record_count = records.count()
all_records_with_filters.extend(list(records))
if filters:
# EJECUTAR CONSULTA - IDÉNTICO A MODO SINGULAR
queryset = model.objects.filter(**filters)
total = queryset.count()
logger.info(f"🔥 Total registros: {total}")
# VERIFICACIÓN ESPECIAL PARA RFC
if 'rfc' in filters:
rfc_value = filters['rfc']
# Doble verificación: contar registros con ese RFC exacto
rfc_exact_count = queryset.filter(rfc=rfc_value).count()
logger.info(f"🔥 Verificación RFC: {rfc_exact_count}/{total} registros tienen RFC '{rfc_value}'")
if rfc_exact_count != total:
logger.error(f"🔥🔥🔥 ERROR: Hay {total - rfc_exact_count} registros con OTRO RFC!")
# Mostrar RFCs diferentes
try:
other_rfcs = queryset.exclude(rfc=rfc_value).values_list('rfc', flat=True).distinct()[:5]
logger.error(f"🔥🔥🔥 RFCs diferentes encontrados: {list(other_rfcs)}")
except:
pass
# Mostrar SQL
logger.info(f"🔥 SQL generada: {str(queryset.query)}")
# Obtener registros
records = queryset.values('patente', 'pedimento', 'datastage_id')
all_records_with_filters.extend(list(records))
except LookupError:
continue
@@ -648,95 +697,247 @@ class ExportDataStageView(APIView):
if record.get('datastage_id'):
related_keys['datastage_ids'].add(record['datastage_id'])
related_keys = {k: list(v) for k, v in related_keys.items() if v}
return related_keys
logger.info(f"\n🔥 Related_keys encontradas:")
logger.info(f"🔥 Patentes: {len(related_keys['patentes'])}")
logger.info(f"🔥 Pedimentos: {len(related_keys['pedimentos'])}")
return {k: list(v) for k, v in related_keys.items() if v}
def apply_global_filters_to_model(self, global_filters, model, user):
"""
Aplica filtros globales específicamente para modelos DataStage (modo simple)
Aplica filtros globales - VERSIÓN CORREGIDA CON UUID
"""
import logging
logger = logging.getLogger(__name__)
filters = {}
model_fields = [f.name for f in model._meta.get_fields()]
if 'organizacion' in model_fields and global_filters.get('organizacion'):
filters['organizacion'] = global_filters['organizacion']
logger.info(f"\n🔍 apply_global_filters_to_model - Modelo: {model.__name__}")
if 'patente' in model_fields and global_filters.get('patente'):
# ORGANIZACIÓN - Manejar como UUID
org_value = global_filters.get('organizacion')
if org_value and org_value != '' and 'organizacion' in model_fields:
field = model._meta.get_field('organizacion')
if hasattr(field, 'related_model'): # Es ForeignKey
# Convertir string a UUID
try:
import uuid
org_uuid = uuid.UUID(org_value)
filters['organizacion_id'] = org_uuid
logger.info(f"✅ Organización como UUID: {org_uuid}")
except Exception as e:
logger.error(f"❌ Error convirtiendo organizacion a UUID: {e}")
# Fallback: dejar como string (puede no funcionar)
filters['organizacion_id'] = org_value
logger.warning(f"⚠️ Organización como string: {org_value}")
else: # Es CharField
filters['organizacion'] = org_value
logger.info(f"✅ Organización como string: {org_value}")
# RFC - Manejar normalmente
rfc_value = global_filters.get('rfc')
if rfc_value and rfc_value != '' and 'rfc' in model_fields:
filters['rfc'] = rfc_value
logger.info(f"✅ RFC: {rfc_value}")
# PATENTE
if global_filters.get('patente'):
filters['patente'] = global_filters['patente']
if 'pedimento' in model_fields and global_filters.get('pedimento'):
# PEDIMENTO
if global_filters.get('pedimento'):
filters['pedimento'] = global_filters['pedimento']
if 'rfc' in model_fields and global_filters.get('rfc'):
filters['rfc'] = global_filters['rfc']
# FECHAS
if 'fecha_pago_real' in model_fields:
if global_filters.get('fecha_pago_desde'):
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
if global_filters.get('fecha_pago_hasta'):
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
logger.info(f"🔍 Filtros finales: {filters}")
return filters
def apply_related_filters(self, global_filters, model, related_keys, user):
"""
Aplica filtros relacionados basados en campos comunes de manera ESTRICTA - VERSIÓN CORREGIDA
"""
filters = {}
model_fields = [f.name for f in model._meta.get_fields()]
logger.info(f"\n🎯 apply_related_filters para {model.__name__}")
# 🔥 ESTRATEGIA MEJORADA: Usar claves relacionadas SI HAY, sino aplicar filtros directos SOLO si existen
has_related_keys = any(related_keys.values())
# 🔥 PRIMERO: APLICAR FILTROS GLOBALES BASE (RFC, ORGANIZACIÓN, FECHAS)
# Estos son los filtros que SIEMPRE deben aplicarse
if has_related_keys:
# 🔥 MODO RELACIONADO ESTRICTO: Usar SOLO las claves obtenidas
# Crear condiciones para las claves relacionadas
from django.db.models import Q
related_conditions = Q()
has_related_conditions = False
if related_keys.get('patentes') and 'patente' in model_fields:
filters['patente__in'] = related_keys['patentes']
has_related_conditions = True
if related_keys.get('pedimentos') and 'pedimento' in model_fields:
filters['pedimento__in'] = related_keys['pedimentos']
has_related_conditions = True
if related_keys.get('datastage_ids') and 'datastage_id' in model_fields:
filters['datastage_id__in'] = related_keys['datastage_ids']
has_related_conditions = True
# Si NO HAY condiciones relacionadas para este modelo (no tiene los campos)
if not has_related_conditions:
return {} # Retornar filtro vacío hará que no se obtengan registros
else:
# 🔥 MODO DIRECTO: No hay claves relacionadas, aplicar filtros directos SOLO si existen
if 'organizacion' in model_fields and global_filters.get('organizacion'):
filters['organizacion'] = global_filters['organizacion']
if 'patente' in model_fields and global_filters.get('patente'):
filters['patente'] = global_filters['patente']
if 'pedimento' in model_fields and global_filters.get('pedimento'):
filters['pedimento'] = global_filters['pedimento']
if 'rfc' in model_fields and global_filters.get('rfc'):
filters['rfc'] = global_filters['rfc']
# 🔥 APLICAR ORGANIZACIÓN SIEMPRE si existe (en ambos modos)
# 1. Organización
if 'organizacion' in model_fields and global_filters.get('organizacion'):
filters['organizacion'] = global_filters['organizacion']
# 🔥 APLICAR FILTROS DE FECHA SIEMPRE (si el campo existe)
logger.info(f"✅ Filtro organizacion: {global_filters.get('organizacion')}")
# 2. RFC (¡ESTO ES LO QUE FALTA!)
if 'rfc' in model_fields and global_filters.get('rfc'):
filters['rfc'] = global_filters['rfc']
logger.info(f"✅ Filtro RFC: {global_filters.get('rfc')}")
# 3. Fechas (SIEMPRE se aplican)
if 'fecha_pago_real' in model_fields:
if global_filters.get('fecha_pago_desde'):
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
logger.info(f"✅ Fecha desde: {global_filters.get('fecha_pago_desde')}")
if global_filters.get('fecha_pago_hasta'):
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
logger.info(f"✅ Fecha hasta: {global_filters.get('fecha_pago_hasta')}")
# 🔥 SEGUNDO: Si hay related_keys, AÑADIRLAS a los filtros existentes
if any(related_keys.values()):
logger.info(f"🎯 Añadiendo related_keys a filtros existentes")
# Añadir patentes si existen
if related_keys.get('patentes') and 'patente' in model_fields:
filters['patente__in'] = related_keys['patentes']
logger.info(f"✅ Filtrando por {len(related_keys['patentes'])} patentes")
# Añadir pedimentos si existen
if related_keys.get('pedimentos') and 'pedimento' in model_fields:
filters['pedimento__in'] = related_keys['pedimentos']
logger.info(f"✅ Filtrando por {len(related_keys['pedimentos'])} pedimentos")
# Añadir datastage_ids si existen
if related_keys.get('datastage_ids') and 'datastage_id' in model_fields:
filters['datastage_id__in'] = related_keys['datastage_ids']
logger.info(f"✅ Filtrando por {len(related_keys['datastage_ids'])} datastage_ids")
# 🔥 TERCERO: Si NO hay related_keys pero hay filtros específicos, añadirlos
else:
# Solo patente y pedimento específicos (no listas)
if 'patente' in model_fields and global_filters.get('patente'):
filters['patente'] = global_filters['patente']
logger.info(f"✅ Filtro patente específica: {global_filters.get('patente')}")
if 'pedimento' in model_fields and global_filters.get('pedimento'):
filters['pedimento'] = global_filters['pedimento']
logger.info(f"✅ Filtro pedimento específico: {global_filters.get('pedimento')}")
logger.info(f"🎯 Filtros FINALES para {model.__name__}: {filters}")
return filters
def estimate_excel_file_size(self, num_records, num_columns):
"""Estima tamaño aproximado del archivo Excel"""
# Estimación aproximada: 100 bytes por celda
return num_records * num_columns * 100
def export_with_size_control(self, request, models_data, global_filters, related_keys):
"""Versión con control de tamaño de archivo"""
try:
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
file_counter = 1
current_wb = None
current_file_size_estimate = 0
MAX_FILE_SIZE_ESTIMATE = 50 * 1024 * 1024 # 50MB estimado
for model_data in models_data:
model_name = model_data.get('model')
fields = model_data.get('fields', [])
if not model_name or not fields:
continue
try:
model = apps.get_model('datastage', model_name)
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
if filters:
queryset = model.objects.filter(**filters).values(*fields)
else:
queryset = model.objects.none()
total_records = queryset.count()
if total_records == 0:
continue
# Calcular tamaño estimado para este modelo
model_size_estimate = self.estimate_excel_file_size(total_records, len(fields))
# Si el modelo es muy grande o no cabe en el archivo actual
needs_new_file = (
current_wb is None or
current_file_size_estimate + model_size_estimate > MAX_FILE_SIZE_ESTIMATE or
(total_records > self.MAX_RECORDS_PER_FILE and current_file_size_estimate > 0)
)
if needs_new_file and current_wb is not None:
# Guardar archivo actual
part_buffer = io.BytesIO()
current_wb.save(part_buffer)
part_buffer.seek(0)
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
file_counter += 1
current_wb = None
current_file_size_estimate = 0
if current_wb is None:
current_wb = openpyxl.Workbook()
current_wb.remove(current_wb.active)
# Manejar modelos que exceden el límite por hoja
if total_records > self.MAX_RECORDS_PER_FILE:
from django.core.paginator import Paginator
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
for page_num in paginator.page_range:
page = paginator.page(page_num)
# Crear hoja para esta parte
sheet_name = f"{model_name[:20]}_p{page_num}"[:31]
ws = current_wb.create_sheet(title=sheet_name)
ws.append(fields)
for row in page.object_list:
row_values = [self.safe_excel_value(row[field]) for field in fields]
ws.append(row_values)
# Actualizar tamaño estimado
page_size = self.estimate_excel_file_size(len(page.object_list), len(fields))
current_file_size_estimate += page_size
else:
# Modelo pequeño, una hoja
sheet_name = model_name[:31]
ws = current_wb.create_sheet(title=sheet_name)
ws.append(fields)
for row in queryset:
row_values = [self.safe_excel_value(row[field]) for field in fields]
ws.append(row_values)
current_file_size_estimate += model_size_estimate
except LookupError:
continue
# Guardar último archivo si existe
if current_wb is not None:
part_buffer = io.BytesIO()
current_wb.save(part_buffer)
part_buffer.seek(0)
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
zip_buffer.seek(0)
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
return response
except Exception as e:
return Response({'error': f'Error: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ExportModelView(APIView):
my_tags = ['Reportes']
permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]