1977 lines
91 KiB
Python
1977 lines
91 KiB
Python
from warnings import filters
|
|
from rest_framework.decorators import api_view, permission_classes
|
|
from rest_framework.permissions import IsAuthenticated
|
|
from api.customs.models import Pedimento, Cove, EDocument, Partida
|
|
from api.record.models import Document
|
|
from api.organization.models import Organizacion
|
|
from django.db.models import Count, Q
|
|
|
|
# Registrar endpoint en urls.py:
|
|
# path('dashboard/summary/', dashboard_summary)
|
|
import csv
|
|
import io
|
|
from drf_yasg.utils import swagger_auto_schema
|
|
from drf_yasg import openapi
|
|
from .serializers import ExportModelSerializer
|
|
from rest_framework.response import Response
|
|
from django.http import HttpResponse
|
|
import openpyxl
|
|
from django.apps import apps
|
|
from rest_framework import status
|
|
from django.shortcuts import render
|
|
from rest_framework import viewsets
|
|
|
|
from .serializers import ExportModelSerializer
|
|
from core.permissions import (
|
|
IsSameOrganization,
|
|
IsSameOrganizationDeveloper,
|
|
IsSameOrganizationAndAdmin,
|
|
IsSuperUser
|
|
)
|
|
from rest_framework.permissions import IsAuthenticated
|
|
|
|
import csv
|
|
import io
|
|
import openpyxl
|
|
from django.http import HttpResponse
|
|
from django.apps import apps
|
|
from rest_framework.views import APIView
|
|
from rest_framework.response import Response
|
|
from rest_framework import status
|
|
from drf_yasg.utils import swagger_auto_schema
|
|
from drf_yasg import openapi
|
|
from rest_framework.permissions import IsAuthenticated
|
|
from core.permissions import (
|
|
IsSameOrganization,
|
|
IsSameOrganizationDeveloper,
|
|
IsSameOrganizationAndAdmin,
|
|
IsSuperUser
|
|
)
|
|
from .serializers import ExportModelSerializer
|
|
import uuid
|
|
import datetime
|
|
import zipfile
|
|
from django.db import models
|
|
|
|
def export_model_to_csv(request, model_name, fields, module='datastage', filters=None):
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**(filters or {})).values(*fields)
|
|
response = HttpResponse(content_type='text/csv')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}.csv"'
|
|
writer = csv.DictWriter(response, fieldnames=fields)
|
|
writer.writeheader()
|
|
for row in queryset:
|
|
writer.writerow(row)
|
|
return response
|
|
|
|
|
|
def export_model_to_excel(request, model_name, fields, module='datastage', filters=None):
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**(filters or {})).values(*fields)
|
|
wb = openpyxl.Workbook()
|
|
ws = wb.active
|
|
ws.append(fields)
|
|
for row in queryset:
|
|
# Convertir cada valor a string para asegurar compatibilidad con Excel
|
|
row_values = []
|
|
for field in fields:
|
|
value = row[field]
|
|
# Si es UUID u otro objeto, convertirlo a string
|
|
if hasattr(value, '__str__'):
|
|
value = str(value)
|
|
row_values.append(value)
|
|
ws.append(row_values)
|
|
output = io.BytesIO()
|
|
wb.save(output)
|
|
output.seek(0)
|
|
response = HttpResponse(output.read(
|
|
), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}.xlsx"'
|
|
return response
|
|
|
|
# class ControlPedimentoView(APIView):
|
|
# my_tags = ['Control-Pedimento']
|
|
# permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
# @swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'})
|
|
# def post(self, request, *args, **kwargs):
|
|
# """
|
|
# Endpoint específico para exportación de DataStage con soporte múltiple
|
|
# """
|
|
# # Verificar si es modo múltiple
|
|
# modo = request.data.get('modo', 'simple')
|
|
|
|
# if modo == 'multiple':
|
|
# return self.handle_multiple_export(request)
|
|
# else:
|
|
# return self.handle_simple_export(request)
|
|
|
|
|
|
|
|
class ExportDataStageView(APIView):
|
|
my_tags = ['Reportes-DataStage']
|
|
permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
# Constantes para partición
|
|
# MAX_RECORDS_PER_FILE = 100 # Límite seguro por archivo
|
|
MAX_RECORDS_PER_FILE = 120000 # Límite seguro por archivo
|
|
|
|
def safe_excel_value(self, value):
|
|
"""
|
|
Convierte cualquier valor a un formato seguro para Excel
|
|
"""
|
|
if value is None:
|
|
return ''
|
|
elif isinstance(value, (uuid.UUID,)):
|
|
return str(value)
|
|
elif hasattr(value, 'uuid'):
|
|
return str(value.uuid)
|
|
elif hasattr(value, 'id'):
|
|
return str(value.id)
|
|
elif isinstance(value, (datetime.datetime, datetime.date)):
|
|
return value.isoformat()
|
|
elif isinstance(value, (dict, list)):
|
|
return str(value)
|
|
else:
|
|
return str(value)
|
|
|
|
@swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'})
|
|
def post(self, request, *args, **kwargs):
|
|
"""
|
|
Endpoint específico para exportación de DataStage con soporte múltiple
|
|
"""
|
|
# Verificar si es modo múltiple
|
|
modo = request.data.get('modo', 'simple')
|
|
|
|
if modo == 'multiple':
|
|
return self.handle_multiple_export(request)
|
|
else:
|
|
return self.handle_simple_export(request)
|
|
|
|
def handle_simple_export(self, request):
|
|
"""Maneja exportación simple de DataStage (un solo modelo)"""
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
global_filters = request.data.get('globalFilters', {})
|
|
export_type = request.data.get('format', 'csv')
|
|
module = 'datastage'
|
|
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
try:
|
|
model = apps.get_model(module, model_name)
|
|
filters = self.apply_global_filters_to_model(global_filters, model, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if export_type == 'excel':
|
|
# Verificar si necesita partición
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_single_model_partitioned(request, model_name, fields, filters, total_records)
|
|
else:
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_single_model_csv_partitioned(request, model_name, fields, filters, total_records)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
except LookupError:
|
|
return Response({'error': f'Model {model_name} not found'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
def handle_multiple_export(self, request):
|
|
"""Maneja exportación múltiple de DataStage (varios modelos)"""
|
|
models_data = request.data.get('models', [])
|
|
export_type = request.data.get('format', 'csv')
|
|
global_filters = request.data.get('globalFilters', {})
|
|
|
|
if not models_data:
|
|
return Response({'error': 'models are required for multiple export'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
related_keys = self.get_related_keys_from_filters(global_filters, models_data, request.user)
|
|
|
|
if export_type == 'excel':
|
|
# Siempre usar el método particionado inteligente para Excel
|
|
return self.export_datastage_multiple_partitioned_excel_agrupados(request, models_data, global_filters, related_keys)
|
|
else:
|
|
# Para CSV, podemos mantener la lógica actual o mejorarla
|
|
total_estimated_records = self.estimate_total_records(models_data, global_filters, related_keys, request.user)
|
|
if total_estimated_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_datastage_multiple_partitioned_csv(request, models_data, global_filters, related_keys)
|
|
else:
|
|
return self.export_datastage_multiple_to_csv(request, models_data, global_filters, related_keys)
|
|
|
|
def estimate_total_records(self, models_data, global_filters, related_keys, user):
|
|
"""Estima el total de registros para todos los modelos"""
|
|
total = 0
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, user)
|
|
total += model.objects.filter(**filters).count()
|
|
except:
|
|
continue
|
|
return total
|
|
|
|
def export_datastage_multiple_to_excel(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage con filtrado relacionado (múltiples hojas)"""
|
|
wb = openpyxl.Workbook()
|
|
wb.remove(wb.active)
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
|
|
# 🔥 APLICAR FILTROS RELACIONADOS
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
# Si hay filtros, aplicarlos; si no, obtener todos los registros
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none() # No obtener nada si no hay filtros
|
|
|
|
# Si no hay registros, saltar este modelo
|
|
if queryset.count() == 0:
|
|
continue
|
|
|
|
# Crear hoja (limitar nombre a 31 caracteres)
|
|
sheet_name = model_name[:31]
|
|
ws = wb.create_sheet(title=sheet_name)
|
|
|
|
# Escribir encabezados
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = []
|
|
for field in fields:
|
|
value = row[field]
|
|
# 🔥 USAR safe_excel_value para convertir valores
|
|
row_values.append(self.safe_excel_value(value))
|
|
ws.append(row_values)
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no se crearon hojas, crear una vacía
|
|
if len(wb.sheetnames) == 0:
|
|
ws = wb.create_sheet(title="Sin datos")
|
|
ws.append(["No se encontraron datos para los modelos especificados"])
|
|
|
|
output = io.BytesIO()
|
|
wb.save(output)
|
|
output.seek(0)
|
|
|
|
response = HttpResponse(
|
|
output.read(),
|
|
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
|
)
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_related_report.xlsx"'
|
|
return response
|
|
|
|
def export_datastage_multiple_partitioned_excel_agrupados(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage agrupados en la misma hoja de Excel, con particionado por límite de registros"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
# 🔥 PRECARGAR ORGANIZACIONES para mapeo rápido
|
|
from api.organization.models import Organizacion
|
|
organizaciones = Organizacion.objects.all()
|
|
org_mapping = {str(org.id): org.nombre for org in organizaciones}
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
# 1. Recopilar todos los datos de cada modelo
|
|
all_models_data = {} # Ahora será una lista por clave
|
|
model_field_mappings = {}
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
# Normalizar nombres de campo entrantes: si se pasó "Organizacion"
|
|
# (cualquier capitalización), usar el campo real de la BD `organizacion_id`.
|
|
normalized_fields = []
|
|
for f in fields:
|
|
try:
|
|
key = f.strip() if isinstance(f, str) else f
|
|
except Exception:
|
|
key = f
|
|
|
|
if isinstance(key, str) and key.lower() == 'organizacion':
|
|
if 'organizacion_id' not in normalized_fields:
|
|
normalized_fields.append('organizacion_id')
|
|
else:
|
|
if key not in normalized_fields:
|
|
normalized_fields.append(key)
|
|
|
|
fields = normalized_fields
|
|
|
|
# Asegurar que tenemos los campos de relación
|
|
required_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
for field in required_fields:
|
|
if field not in fields:
|
|
fields.append(field)
|
|
|
|
# 🔥 Añadir organizacion_id a los campos si no está y existe en el modelo
|
|
if 'organizacion_id' not in fields and 'organizacion_id' in [f.name for f in apps.get_model('datastage', model_name)._meta.get_fields()]:
|
|
fields.append('organizacion_id')
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Determinar campos de relación disponibles en este modelo
|
|
relation_fields = []
|
|
for field_name in ['seccion_aduanera', 'patente', 'pedimento']:
|
|
if field_name in fields:
|
|
relation_fields.append(field_name)
|
|
|
|
if not relation_fields:
|
|
# Si no hay campos de relación, usar un identificador único
|
|
relation_fields = ['datastage_id'] if 'datastage_id' in fields else [fields[0]]
|
|
|
|
# Guardar mapeo de campos para este modelo
|
|
if model_name not in model_field_mappings:
|
|
model_field_mappings[model_name] = fields
|
|
|
|
# Procesar cada registro
|
|
for record in queryset:
|
|
# Crear clave de relación
|
|
key_parts = []
|
|
for rel_field in relation_fields:
|
|
if rel_field in record and record[rel_field] is not None:
|
|
key_parts.append(str(record[rel_field]))
|
|
|
|
if not key_parts:
|
|
# Si no hay campos de relación, usar un hash del registro
|
|
import hashlib
|
|
record_str = str(sorted(record.items()))
|
|
key = hashlib.md5(record_str.encode()).hexdigest()[:10]
|
|
else:
|
|
key = "_".join(key_parts)
|
|
|
|
# 🔥 PROCESAR CAMPO organizacion_id para convertirlo a nombre
|
|
processed_record = {}
|
|
for field_name, value in record.items():
|
|
# Convertir organizacion_id a nombre
|
|
if field_name == 'organizacion_id' and value:
|
|
org_id_str = str(value)
|
|
# Usar el nombre de la organización si está en el mapeo
|
|
if org_id_str in org_mapping:
|
|
processed_value = org_mapping[org_id_str]
|
|
else:
|
|
# Si no se encuentra, intentar obtener de la base de datos
|
|
try:
|
|
org = Organizacion.objects.filter(id=value).first()
|
|
processed_value = org.nombre if org else str(value)
|
|
# Actualizar mapeo para futuras referencias
|
|
org_mapping[org_id_str] = processed_value
|
|
except:
|
|
processed_value = str(value)
|
|
else:
|
|
processed_value = value
|
|
|
|
# Agregar prefijo del modelo a los campos para evitar colisiones
|
|
if field_name in relation_fields:
|
|
prefixed_field_name = field_name
|
|
else:
|
|
prefixed_field_name = f"{model_name}_{field_name}"
|
|
|
|
# 🔥 RENOMBRAR organizacion_id a organizacion_nombre
|
|
if field_name == 'organizacion_id':
|
|
prefixed_field_name = prefixed_field_name.replace('organizacion_id', 'organizacion_nombre')
|
|
|
|
processed_record[prefixed_field_name] = self.safe_excel_value(processed_value)
|
|
|
|
# 🔥 CORRECIÓN: Ahora almacenamos una LISTA de registros por clave
|
|
if key not in all_models_data:
|
|
all_models_data[key] = {
|
|
'relation_fields': {}, # Campos de relación compartidos
|
|
'model_records': {} # Diccionario de listas por modelo
|
|
}
|
|
|
|
# Guardar campos de relación (solo una vez, ya que son los mismos)
|
|
for rel_field in relation_fields:
|
|
if rel_field in record:
|
|
all_models_data[key]['relation_fields'][rel_field] = record[rel_field]
|
|
|
|
# 🔥 GUARDAR COMO LISTA: Crear lista si no existe
|
|
if model_name not in all_models_data[key]['model_records']:
|
|
all_models_data[key]['model_records'][model_name] = []
|
|
|
|
# Agregar este registro a la lista del modelo
|
|
all_models_data[key]['model_records'][model_name].append(processed_record)
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no hay datos, retornar error
|
|
if not all_models_data:
|
|
return Response({'error': 'No se encontraron datos para exportar'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
# 2. Crear estructura de filas combinadas
|
|
# Ahora necesitamos expandir las filas cuando hay múltiples registros con la misma clave
|
|
combined_rows = []
|
|
|
|
for key, data in all_models_data.items():
|
|
relation_fields = data['relation_fields']
|
|
model_records = data['model_records']
|
|
|
|
# 🔥 NUEVO: Calcular cuántas filas necesitamos para esta clave
|
|
# Encontrar el modelo con más registros para esta clave
|
|
max_records_per_key = 1
|
|
for model_name, records in model_records.items():
|
|
if len(records) > max_records_per_key:
|
|
max_records_per_key = len(records)
|
|
|
|
# 🔗 CREAR UNA FILA POR CADA COMBINACIÓN
|
|
for i in range(max_records_per_key):
|
|
row_data = {}
|
|
|
|
# Campos de relación (mismos para todas las filas con esta clave)
|
|
for rel_field, rel_value in relation_fields.items():
|
|
row_data[rel_field] = self.safe_excel_value(rel_value)
|
|
|
|
# Datos de cada modelo
|
|
for model_name, records in model_records.items():
|
|
# Si hay un registro en esta posición i
|
|
if i < len(records):
|
|
record = records[i]
|
|
for field_name, value in record.items():
|
|
row_data[field_name] = value
|
|
else:
|
|
# Si no hay más registros para este modelo, poner campos vacíos
|
|
for field_name in model_field_mappings.get(model_name, []):
|
|
if field_name in ['seccion_aduanera', 'patente', 'pedimento', 'organizacion_id']:
|
|
# Los campos de relación ya están llenados o transformados
|
|
continue
|
|
prefixed_field_name = f"{model_name}_{field_name}"
|
|
# 🔥 RENOMBRAR organizacion_id a organizacion_nombre
|
|
if field_name == 'organizacion_id':
|
|
prefixed_field_name = prefixed_field_name.replace('organizacion_id', 'organizacion_nombre')
|
|
row_data[prefixed_field_name] = ''
|
|
|
|
combined_rows.append(row_data)
|
|
|
|
# 3. Determinar todos los campos únicos para los encabezados
|
|
all_fields_set = set()
|
|
|
|
# Campos de relación primero
|
|
common_relation_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
|
|
# Agregar todos los campos de todas las filas
|
|
for row in combined_rows:
|
|
all_fields_set.update(row.keys())
|
|
|
|
# Ordenar campos: relación primero, luego alfabéticamente
|
|
all_fields = []
|
|
for rel_field in common_relation_fields:
|
|
if rel_field in all_fields_set:
|
|
all_fields.append(rel_field)
|
|
all_fields_set.remove(rel_field)
|
|
|
|
# 🔥 Mover organizacion_nombre cerca de los campos de relación
|
|
org_fields = [f for f in all_fields_set if 'organizacion' in f.lower()]
|
|
for org_field in sorted(org_fields):
|
|
all_fields.append(org_field)
|
|
all_fields_set.remove(org_field)
|
|
|
|
# Agregar el resto de campos ordenados alfabéticamente
|
|
all_fields.extend(sorted(all_fields_set))
|
|
|
|
total_records = len(combined_rows)
|
|
|
|
# 4. Manejar particionado
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(combined_rows, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear nuevo workbook para cada partición
|
|
current_wb = openpyxl.Workbook()
|
|
current_ws = current_wb.active
|
|
|
|
# Nombre de hoja limitado a 31 caracteres
|
|
sheet_name = f"Datastage_p{page_num}"
|
|
if len(sheet_name) > 31:
|
|
sheet_name = sheet_name[:31]
|
|
current_ws.title = sheet_name
|
|
|
|
# Escribir encabezados
|
|
current_ws.append(all_fields)
|
|
|
|
# Escribir datos de esta página
|
|
for row_data in page.object_list:
|
|
row_values = [row_data.get(field, '') for field in all_fields]
|
|
current_ws.append(row_values)
|
|
|
|
# Autoajustar anchos de columna
|
|
for column in current_ws.columns:
|
|
max_length = 0
|
|
column_letter = column[0].column_letter
|
|
|
|
for cell in column:
|
|
try:
|
|
if len(str(cell.value)) > max_length:
|
|
max_length = len(str(cell.value))
|
|
except:
|
|
pass
|
|
|
|
adjusted_width = min(max_length + 2, 50)
|
|
current_ws.column_dimensions[column_letter].width = adjusted_width
|
|
|
|
# Guardar archivo en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{page_num}.xlsx", part_buffer.getvalue())
|
|
|
|
# Información de depuración
|
|
print(f"Creada partición {page_num} con {len(page.object_list)} registros combinados")
|
|
print(f"Total de claves únicas: {len(all_models_data)}")
|
|
print(f"Total de filas expandidas: {total_records}")
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_combinado.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
import traceback
|
|
error_details = traceback.format_exc()
|
|
print(f"Error en exportación: {error_details}")
|
|
return Response({'error': f'Error en exportación combinada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
|
|
def export_datastage_multiple_partitioned_excel_test_3(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage agrupados en la misma hoja de Excel, con particionado por límite de registros"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
# 1. Recopilar todos los datos de cada modelo
|
|
all_models_data = {} # Ahora será una lista por clave
|
|
model_field_mappings = {}
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
# Asegurar que tenemos los campos de relación
|
|
required_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
for field in required_fields:
|
|
if field not in fields:
|
|
fields.append(field)
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Determinar campos de relación disponibles en este modelo
|
|
relation_fields = []
|
|
for field_name in ['seccion_aduanera', 'patente', 'pedimento']:
|
|
if field_name in fields:
|
|
relation_fields.append(field_name)
|
|
|
|
if not relation_fields:
|
|
# Si no hay campos de relación, usar un identificador único
|
|
relation_fields = ['datastage_id'] if 'datastage_id' in fields else [fields[0]]
|
|
|
|
# Guardar mapeo de campos para este modelo
|
|
if model_name not in model_field_mappings:
|
|
model_field_mappings[model_name] = fields
|
|
|
|
# Procesar cada registro
|
|
for record in queryset:
|
|
# Crear clave de relación
|
|
key_parts = []
|
|
for rel_field in relation_fields:
|
|
if rel_field in record and record[rel_field] is not None:
|
|
key_parts.append(str(record[rel_field]))
|
|
|
|
if not key_parts:
|
|
# Si no hay campos de relación, usar un hash del registro
|
|
import hashlib
|
|
record_str = str(sorted(record.items()))
|
|
key = hashlib.md5(record_str.encode()).hexdigest()[:10]
|
|
else:
|
|
key = "_".join(key_parts)
|
|
|
|
# Agregar prefijo del modelo a los campos para evitar colisiones
|
|
prefixed_fields = {}
|
|
for field_name, value in record.items():
|
|
# Solo agregar prefijo si no es un campo de relación
|
|
if field_name in relation_fields:
|
|
prefixed_field_name = field_name
|
|
else:
|
|
prefixed_field_name = f"{model_name}_{field_name}"
|
|
prefixed_fields[prefixed_field_name] = self.safe_excel_value(value)
|
|
|
|
# 🔥 CORRECIÓN: Ahora almacenamos una LISTA de registros por clave
|
|
if key not in all_models_data:
|
|
all_models_data[key] = {
|
|
'relation_fields': {}, # Campos de relación compartidos
|
|
'model_records': {} # Diccionario de listas por modelo
|
|
}
|
|
|
|
# Guardar campos de relación (solo una vez, ya que son los mismos)
|
|
for rel_field in relation_fields:
|
|
if rel_field in record:
|
|
all_models_data[key]['relation_fields'][rel_field] = record[rel_field]
|
|
|
|
# 🔥 GUARDAR COMO LISTA: Crear lista si no existe
|
|
if model_name not in all_models_data[key]['model_records']:
|
|
all_models_data[key]['model_records'][model_name] = []
|
|
|
|
# Agregar este registro a la lista del modelo
|
|
all_models_data[key]['model_records'][model_name].append(prefixed_fields)
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no hay datos, retornar error
|
|
if not all_models_data:
|
|
return Response({'error': 'No se encontraron datos para exportar'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
# 2. Crear estructura de filas combinadas
|
|
# Ahora necesitamos expandir las filas cuando hay múltiples registros con la misma clave
|
|
combined_rows = []
|
|
|
|
for key, data in all_models_data.items():
|
|
relation_fields = data['relation_fields']
|
|
model_records = data['model_records']
|
|
|
|
# 🔥 NUEVO: Calcular cuántas filas necesitamos para esta clave
|
|
# Encontrar el modelo con más registros para esta clave
|
|
max_records_per_key = 1
|
|
for model_name, records in model_records.items():
|
|
if len(records) > max_records_per_key:
|
|
max_records_per_key = len(records)
|
|
|
|
# 🔗 CREAR UNA FILA POR CADA COMBINACIÓN
|
|
for i in range(max_records_per_key):
|
|
row_data = {}
|
|
|
|
# Campos de relación (mismos para todas las filas con esta clave)
|
|
for rel_field, rel_value in relation_fields.items():
|
|
row_data[rel_field] = self.safe_excel_value(rel_value)
|
|
|
|
# Datos de cada modelo
|
|
for model_name, records in model_records.items():
|
|
# Si hay un registro en esta posición i
|
|
if i < len(records):
|
|
record = records[i]
|
|
for field_name, value in record.items():
|
|
row_data[field_name] = value
|
|
else:
|
|
# Si no hay más registros para este modelo, poner campos vacíos
|
|
for field_name in model_field_mappings.get(model_name, []):
|
|
if field_name in ['seccion_aduanera', 'patente', 'pedimento']:
|
|
# Los campos de relación ya están llenados
|
|
continue
|
|
prefixed_field_name = f"{model_name}_{field_name}"
|
|
row_data[prefixed_field_name] = ''
|
|
|
|
combined_rows.append(row_data)
|
|
|
|
# 3. Determinar todos los campos únicos para los encabezados
|
|
all_fields_set = set()
|
|
|
|
# Campos de relación primero
|
|
common_relation_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
|
|
# Agregar todos los campos de todas las filas
|
|
for row in combined_rows:
|
|
all_fields_set.update(row.keys())
|
|
|
|
# Ordenar campos: relación primero, luego alfabéticamente
|
|
all_fields = []
|
|
for rel_field in common_relation_fields:
|
|
if rel_field in all_fields_set:
|
|
all_fields.append(rel_field)
|
|
all_fields_set.remove(rel_field)
|
|
|
|
# Agregar el resto de campos ordenados alfabéticamente
|
|
all_fields.extend(sorted(all_fields_set))
|
|
|
|
total_records = len(combined_rows)
|
|
|
|
# 4. Manejar particionado
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(combined_rows, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear nuevo workbook para cada partición
|
|
current_wb = openpyxl.Workbook()
|
|
current_ws = current_wb.active
|
|
|
|
# Nombre de hoja limitado a 31 caracteres
|
|
sheet_name = f"Datastage_p{page_num}"
|
|
if len(sheet_name) > 31:
|
|
sheet_name = sheet_name[:31]
|
|
current_ws.title = sheet_name
|
|
|
|
# Escribir encabezados
|
|
current_ws.append(all_fields)
|
|
|
|
# Escribir datos de esta página
|
|
for row_data in page.object_list:
|
|
row_values = [row_data.get(field, '') for field in all_fields]
|
|
current_ws.append(row_values)
|
|
|
|
# Autoajustar anchos de columna
|
|
for column in current_ws.columns:
|
|
max_length = 0
|
|
column_letter = column[0].column_letter
|
|
|
|
for cell in column:
|
|
try:
|
|
if len(str(cell.value)) > max_length:
|
|
max_length = len(str(cell.value))
|
|
except:
|
|
pass
|
|
|
|
adjusted_width = min(max_length + 2, 50)
|
|
current_ws.column_dimensions[column_letter].width = adjusted_width
|
|
|
|
# Guardar archivo en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{page_num}.xlsx", part_buffer.getvalue())
|
|
|
|
# Información de depuración
|
|
print(f"Creada partición {page_num} con {len(page.object_list)} registros combinados")
|
|
print(f"Total de claves únicas: {len(all_models_data)}")
|
|
print(f"Total de filas expandidas: {total_records}")
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_combinado.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
import traceback
|
|
error_details = traceback.format_exc()
|
|
print(f"Error en exportación: {error_details}")
|
|
return Response({'error': f'Error en exportación combinada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
|
|
|
|
def export_datastage_multiple_partitioned_excel_test_2(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage agrupados en la misma hoja de Excel, con particionado por límite de registros"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
# 1. Recopilar todos los datos de cada modelo por clave (aduana, patente, pedimento)
|
|
all_models_data = {}
|
|
model_field_mappings = {}
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
required_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
|
|
for field in required_fields:
|
|
if field not in fields:
|
|
fields.append(field)
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Determinar campos de relación disponibles en este modelo
|
|
relation_fields = []
|
|
for field_name in ['seccion_aduanera', 'patente', 'pedimento']:
|
|
if field_name in fields:
|
|
relation_fields.append(field_name)
|
|
|
|
if not relation_fields:
|
|
# Si no hay campos de relación, usar un identificador único
|
|
relation_fields = ['datastage_id'] if 'datastage_id' in fields else [fields[0]]
|
|
|
|
# Procesar cada registro
|
|
for record in queryset:
|
|
# Crear clave de relación
|
|
key_parts = []
|
|
for rel_field in relation_fields:
|
|
if rel_field in record and record[rel_field] is not None:
|
|
key_parts.append(str(record[rel_field]))
|
|
|
|
if not key_parts:
|
|
# Si no hay campos de relación, usar un hash del registro
|
|
import hashlib
|
|
record_str = str(sorted(record.items()))
|
|
key = hashlib.md5(record_str.encode()).hexdigest()[:10]
|
|
else:
|
|
key = "_".join(key_parts)
|
|
|
|
# Agregar prefijo del modelo a los campos para evitar colisiones
|
|
prefixed_fields = {}
|
|
for field_name, value in record.items():
|
|
prefixed_field_name = f"{model_name}_{field_name}"
|
|
prefixed_fields[prefixed_field_name] = self.safe_excel_value(value)
|
|
# Registrar mapeo de campos
|
|
if model_name not in model_field_mappings:
|
|
model_field_mappings[model_name] = []
|
|
if field_name not in model_field_mappings[model_name]:
|
|
model_field_mappings[model_name].append(field_name)
|
|
|
|
# Guardar datos bajo la clave
|
|
if key not in all_models_data:
|
|
all_models_data[key] = {
|
|
'relation_fields': {},
|
|
'model_data': {}
|
|
}
|
|
|
|
# Guardar campos de relación
|
|
for rel_field in relation_fields:
|
|
if rel_field in record:
|
|
all_models_data[key]['relation_fields'][rel_field] = record[rel_field]
|
|
|
|
# Guardar datos del modelo
|
|
all_models_data[key]['model_data'][model_name] = prefixed_fields
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no hay datos, retornar error
|
|
if not all_models_data:
|
|
return Response({'error': 'No se encontraron datos para exportar'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
# 2. Determinar todos los campos únicos que necesitaremos
|
|
all_fields_set = set()
|
|
|
|
# Primero agregar campos de relación comunes
|
|
common_relation_fields = ['seccion_aduanera', 'patente', 'pedimento']
|
|
|
|
for key, data in all_models_data.items():
|
|
# Agregar campos de relación
|
|
for rel_field in common_relation_fields:
|
|
if rel_field in data['relation_fields']:
|
|
all_fields_set.add(rel_field)
|
|
|
|
# Agregar campos de todos los modelos para esta clave
|
|
for model_name, model_fields in data['model_data'].items():
|
|
for field_name in model_fields.keys():
|
|
all_fields_set.add(field_name)
|
|
|
|
# Convertir a lista ordenada (campos de relación primero)
|
|
all_fields = []
|
|
for rel_field in common_relation_fields:
|
|
if rel_field in all_fields_set:
|
|
all_fields.append(rel_field)
|
|
all_fields_set.remove(rel_field)
|
|
|
|
# Luego agregar el resto de campos ordenados alfabéticamente
|
|
all_fields.extend(sorted(all_fields_set))
|
|
|
|
# 3. Crear datos combinados por fila
|
|
combined_rows = []
|
|
|
|
for key, data in all_models_data.items():
|
|
row_data = {}
|
|
|
|
# Campos de relación
|
|
for rel_field in common_relation_fields:
|
|
if rel_field in data['relation_fields']:
|
|
row_data[rel_field] = self.safe_excel_value(data['relation_fields'][rel_field])
|
|
else:
|
|
row_data[rel_field] = ''
|
|
|
|
# Datos de cada modelo
|
|
for model_name, model_fields in data['model_data'].items():
|
|
for field_name, value in model_fields.items():
|
|
row_data[field_name] = value
|
|
|
|
# Rellenar campos faltantes con vacío
|
|
for field in all_fields:
|
|
if field not in row_data:
|
|
row_data[field] = ''
|
|
|
|
combined_rows.append(row_data)
|
|
|
|
total_records = len(combined_rows)
|
|
|
|
# 4. Manejar particionado
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(combined_rows, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear nuevo workbook para cada partición
|
|
current_wb = openpyxl.Workbook()
|
|
current_ws = current_wb.active
|
|
|
|
# Nombre de hoja limitado a 31 caracteres
|
|
sheet_name = f"Datastage_p{page_num}"
|
|
if len(sheet_name) > 31:
|
|
sheet_name = sheet_name[:31]
|
|
current_ws.title = sheet_name
|
|
|
|
# Escribir encabezados
|
|
current_ws.append(all_fields)
|
|
|
|
# Escribir datos de esta página
|
|
for row_data in page.object_list:
|
|
row_values = [row_data.get(field, '') for field in all_fields]
|
|
current_ws.append(row_values)
|
|
|
|
# Autoajustar anchos de columna (opcional)
|
|
for column in current_ws.columns:
|
|
max_length = 0
|
|
column_letter = column[0].column_letter
|
|
|
|
for cell in column:
|
|
try:
|
|
if len(str(cell.value)) > max_length:
|
|
max_length = len(str(cell.value))
|
|
except:
|
|
pass
|
|
|
|
adjusted_width = min(max_length + 2, 50) # Máximo 50 caracteres
|
|
current_ws.column_dimensions[column_letter].width = adjusted_width
|
|
|
|
# Guardar archivo en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{page_num}.xlsx", part_buffer.getvalue())
|
|
|
|
# Información de depuración
|
|
print(f"Creada partición {page_num} con {len(page.object_list)} registros combinados")
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_combinado.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
import traceback
|
|
error_details = traceback.format_exc()
|
|
print(f"Error en exportación: {error_details}")
|
|
return Response({'error': f'Error en exportación combinada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
|
|
def export_datastage_multiple_partitioned_excel_test(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage agrupados en la misma hoja de Excel, con particionado por límite de registros"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
file_counter = 1
|
|
current_wb = None
|
|
current_ws = None
|
|
current_record_count = 0
|
|
combined_fields = [] # Almacenar todos los campos únicos
|
|
combined_data = [] # Almacenar todos los datos
|
|
|
|
# 1. Primero recopilar todos los campos y datos
|
|
all_models_data = {}
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Almacenar los datos de este modelo
|
|
all_models_data[model_name] = {
|
|
'fields': fields,
|
|
'data': list(queryset),
|
|
'total_records': total_records
|
|
}
|
|
|
|
# Agregar campos únicos a la lista combinada
|
|
for field in fields:
|
|
if field not in combined_fields:
|
|
combined_fields.append(field)
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no hay datos, retornar error
|
|
if not all_models_data:
|
|
return Response({'error': 'No se encontraron datos para exportar'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
# 2. Crear estructura de datos combinada
|
|
# Primero, preparar los datos combinados
|
|
for model_name, model_info in all_models_data.items():
|
|
fields = model_info['fields']
|
|
data = model_info['data']
|
|
|
|
for record in data:
|
|
combined_record = {}
|
|
|
|
# Para cada campo en la lista combinada
|
|
for combined_field in combined_fields:
|
|
if combined_field in fields:
|
|
# Si el campo existe en este modelo, usar su valor
|
|
value = record.get(combined_field)
|
|
combined_record[combined_field] = self.safe_excel_value(value)
|
|
else:
|
|
# Si no existe, poner vacío
|
|
combined_record[combined_field] = ''
|
|
|
|
# Agregar columna para identificar el modelo origen
|
|
combined_record['_modelo_origen'] = model_name
|
|
|
|
combined_data.append(combined_record)
|
|
|
|
# Agregar campo de modelo origen a la lista de campos si no está ya
|
|
if '_modelo_origen' not in combined_fields:
|
|
combined_fields.append('_modelo_origen')
|
|
|
|
total_combined_records = len(combined_data)
|
|
|
|
# 3. Manejar particionado
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(combined_data, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear nuevo workbook para cada partición
|
|
current_wb = openpyxl.Workbook()
|
|
current_ws = current_wb.active
|
|
current_ws.title = f"Todos_Modelos_p{page_num}"[:31]
|
|
|
|
# Escribir encabezados
|
|
current_ws.append(combined_fields)
|
|
|
|
# Escribir datos de esta página
|
|
for record in page.object_list:
|
|
row_values = [record.get(field, '') for field in combined_fields]
|
|
current_ws.append(row_values)
|
|
|
|
# Guardar archivo en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_combinado_part{page_num}.xlsx", part_buffer.getvalue())
|
|
|
|
# Información de depuración (opcional)
|
|
print(f"Creada partición {page_num} con {len(page.object_list)} registros")
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_combinado.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
import traceback
|
|
error_details = traceback.format_exc()
|
|
print(f"Error en exportación: {error_details}")
|
|
return Response({'error': f'Error en exportación combinada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_datastage_multiple_partitioned_excel(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos Excel particionados inteligentemente"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
file_counter = 1
|
|
current_wb = None
|
|
current_file_records_count = 0
|
|
MAX_SHEETS_PER_FILE = 10 # Límite de hojas por archivo Excel
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Si el modelo necesita particionarse (más de MAX_RECORDS_PER_FILE)
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Verificar si necesitamos crear nuevo archivo
|
|
# 1. Si no hay archivo actual
|
|
# 2. Si ya tenemos muchas hojas en este archivo
|
|
# 3. Si este archivo ya está "lleno" (muchos registros)
|
|
if (current_wb is None or
|
|
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
|
|
current_file_records_count > self.MAX_RECORDS_PER_FILE * 3): # ~150K registros
|
|
|
|
if current_wb is not None:
|
|
# Guardar archivo actual en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
|
|
# Crear nuevo workbook
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active) # Remover hoja por defecto
|
|
current_file_records_count = 0
|
|
|
|
# Crear hoja para esta parte del modelo
|
|
sheet_name = f"{model_name[:25]}_p{page_num}"
|
|
ws = current_wb.create_sheet(title=sheet_name[:31])
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_records_count += len(page.object_list)
|
|
|
|
else:
|
|
# Modelo pequeño (≤ MAX_RECORDS_PER_FILE)
|
|
# Verificar si necesitamos nuevo archivo
|
|
if (current_wb is None or
|
|
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
|
|
current_file_records_count + total_records > self.MAX_RECORDS_PER_FILE * 3):
|
|
|
|
if current_wb is not None:
|
|
# Guardar archivo actual
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
|
|
# Crear nuevo workbook
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active)
|
|
current_file_records_count = 0
|
|
|
|
# Crear hoja para este modelo
|
|
sheet_name = model_name[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_records_count += total_records
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Guardar el último workbook si existe
|
|
if current_wb is not None:
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error en exportación particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_datastage_multiple_to_csv(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos CSV en ZIP"""
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
writer.writerow(fields)
|
|
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
def export_datastage_multiple_partitioned_csv(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos CSV particionados en ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
|
|
writer.writerow(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}_part{page_num}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
else:
|
|
# Modelo pequeño, exportar completo
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
|
|
# Escribir encabezados
|
|
writer.writerow(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
except LookupError as e:
|
|
continue
|
|
except Exception as e:
|
|
continue
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error en exportación CSV particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_single_model_partitioned(self, request, model_name, fields, filters, total_records):
|
|
"""Exporta un solo modelo particionado a ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
module = 'datastage'
|
|
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear Excel para esta parte
|
|
wb = openpyxl.Workbook()
|
|
ws = wb.active
|
|
ws.title = f"Parte_{page_num}"[:31]
|
|
ws.append(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
part_buffer = io.BytesIO()
|
|
wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
|
|
filename = f"{model_name}_part{page_num}.xlsx"
|
|
zip_file.writestr(filename, part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
zip_content = zip_buffer.getvalue()
|
|
|
|
response = HttpResponse(zip_content, content_type='application/zip')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"'
|
|
response['Content-Length'] = len(zip_content)
|
|
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error exportando modelo: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_single_model_csv_partitioned(self, request, model_name, fields, filters, total_records):
|
|
"""Exporta un solo modelo CSV particionado a ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
module = 'datastage'
|
|
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
writer.writerow(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}_part{page_num}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
zip_content = zip_buffer.getvalue()
|
|
|
|
response = HttpResponse(zip_content, content_type='application/zip')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"'
|
|
response['Content-Length'] = len(zip_content)
|
|
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error exportando modelo CSV: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def get_related_keys_from_filters(self, global_filters, models_data, user):
|
|
"""
|
|
Obtiene patentes, pedimentos y datastages que cumplen EXACTAMENTE con TODOS los filtros globales
|
|
VERSIÓN SIMPLIFICADA - Usa la MISMA lógica que apply_global_filters_to_model
|
|
"""
|
|
related_keys = {
|
|
'patentes': set(),
|
|
'pedimentos': set(),
|
|
'datastage_ids': set()
|
|
}
|
|
|
|
# Si no hay filtros, retornar vacío
|
|
if not any(v for v in global_filters.values() if v not in [None, '']):
|
|
return {}
|
|
|
|
all_records_with_filters = []
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
|
|
# ¡USAR LA MISMA FUNCIÓN QUE EN MODO SINGULAR!
|
|
filters = self.apply_global_filters_to_model(global_filters, model, user)
|
|
|
|
if filters:
|
|
# EJECUTAR CONSULTA - IDÉNTICO A MODO SINGULAR
|
|
queryset = model.objects.filter(**filters)
|
|
total = queryset.count()
|
|
|
|
# VERIFICACIÓN ESPECIAL PARA RFC
|
|
if 'rfc' in filters:
|
|
rfc_value = filters['rfc']
|
|
# Doble verificación: contar registros con ese RFC exacto
|
|
rfc_exact_count = queryset.filter(rfc=rfc_value).count()
|
|
|
|
if rfc_exact_count != total:
|
|
try:
|
|
other_rfcs = queryset.exclude(rfc=rfc_value).values_list('rfc', flat=True).distinct()[:5]
|
|
except:
|
|
pass
|
|
|
|
# Obtener registros
|
|
records = queryset.values('patente', 'pedimento', 'datastage_id')
|
|
all_records_with_filters.extend(list(records))
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
if not all_records_with_filters:
|
|
return {'patentes': set(), 'pedimentos': set(), 'datastage_ids': set()}
|
|
|
|
for record in all_records_with_filters:
|
|
if record.get('patente'):
|
|
related_keys['patentes'].add(record['patente'])
|
|
if record.get('pedimento'):
|
|
related_keys['pedimentos'].add(record['pedimento'])
|
|
if record.get('datastage_id'):
|
|
related_keys['datastage_ids'].add(record['datastage_id'])
|
|
|
|
return {k: list(v) for k, v in related_keys.items() if v}
|
|
|
|
def apply_global_filters_to_model(self, global_filters, model, user):
|
|
"""
|
|
Aplica filtros globales - VERSIÓN CORREGIDA CON UUID
|
|
"""
|
|
|
|
filters = {}
|
|
model_fields = [f.name for f in model._meta.get_fields()]
|
|
|
|
# ORGANIZACIÓN - Manejar como UUID
|
|
org_value = global_filters.get('organizacion')
|
|
if org_value and org_value != '' and 'organizacion' in model_fields:
|
|
field = model._meta.get_field('organizacion')
|
|
|
|
if hasattr(field, 'related_model'): # Es ForeignKey
|
|
# Convertir string a UUID
|
|
try:
|
|
import uuid
|
|
org_uuid = uuid.UUID(org_value)
|
|
filters['organizacion_id'] = org_uuid
|
|
except Exception as e:
|
|
# Fallback: dejar como string (puede no funcionar)
|
|
filters['organizacion_id'] = org_value
|
|
else: # Es CharField
|
|
filters['organizacion'] = org_value
|
|
|
|
# RFC - Manejar normalmente
|
|
rfc_value = global_filters.get('rfc')
|
|
if rfc_value and rfc_value != '' and 'rfc' in model_fields:
|
|
filters['rfc'] = rfc_value
|
|
|
|
# PATENTE
|
|
if global_filters.get('patente'):
|
|
filters['patente'] = global_filters['patente']
|
|
|
|
# PEDIMENTO
|
|
if global_filters.get('pedimento'):
|
|
filters['pedimento'] = global_filters['pedimento']
|
|
|
|
# FECHAS
|
|
if 'fecha_pago_real' in model_fields:
|
|
if global_filters.get('fecha_pago_desde'):
|
|
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
|
|
|
|
if global_filters.get('fecha_pago_hasta'):
|
|
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
|
|
|
|
return filters
|
|
|
|
def apply_related_filters(self, global_filters, model, related_keys, user):
|
|
filters = {}
|
|
model_fields = [f.name for f in model._meta.get_fields()]
|
|
|
|
# 1. Organización
|
|
if 'organizacion' in model_fields and global_filters.get('organizacion'):
|
|
filters['organizacion'] = global_filters['organizacion']
|
|
|
|
# 2. RFC (¡ESTO ES LO QUE FALTA!)
|
|
if 'rfc' in model_fields and global_filters.get('rfc'):
|
|
filters['rfc'] = global_filters['rfc']
|
|
|
|
# 3. Fechas (SIEMPRE se aplican)
|
|
if 'fecha_pago_real' in model_fields:
|
|
if global_filters.get('fecha_pago_desde'):
|
|
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
|
|
|
|
if global_filters.get('fecha_pago_hasta'):
|
|
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
|
|
|
|
# 🔥 SEGUNDO: Si hay related_keys, AÑADIRLAS a los filtros existentes
|
|
if any(related_keys.values()):
|
|
|
|
# Añadir patentes si existen
|
|
if related_keys.get('patentes') and 'patente' in model_fields:
|
|
filters['patente__in'] = related_keys['patentes']
|
|
|
|
# Añadir pedimentos si existen
|
|
if related_keys.get('pedimentos') and 'pedimento' in model_fields:
|
|
filters['pedimento__in'] = related_keys['pedimentos']
|
|
|
|
# Añadir datastage_ids si existen
|
|
if related_keys.get('datastage_ids') and 'datastage_id' in model_fields:
|
|
filters['datastage_id__in'] = related_keys['datastage_ids']
|
|
|
|
else:
|
|
# Solo patente y pedimento específicos (no listas)
|
|
if 'patente' in model_fields and global_filters.get('patente'):
|
|
filters['patente'] = global_filters['patente']
|
|
|
|
if 'pedimento' in model_fields and global_filters.get('pedimento'):
|
|
filters['pedimento'] = global_filters['pedimento']
|
|
|
|
return filters
|
|
|
|
def estimate_excel_file_size(self, num_records, num_columns):
|
|
"""Estima tamaño aproximado del archivo Excel"""
|
|
# Estimación aproximada: 100 bytes por celda
|
|
return num_records * num_columns * 100
|
|
|
|
def export_with_size_control(self, request, models_data, global_filters, related_keys):
|
|
"""Versión con control de tamaño de archivo"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
file_counter = 1
|
|
current_wb = None
|
|
current_file_size_estimate = 0
|
|
MAX_FILE_SIZE_ESTIMATE = 50 * 1024 * 1024 # 50MB estimado
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Calcular tamaño estimado para este modelo
|
|
model_size_estimate = self.estimate_excel_file_size(total_records, len(fields))
|
|
|
|
# Si el modelo es muy grande o no cabe en el archivo actual
|
|
needs_new_file = (
|
|
current_wb is None or
|
|
current_file_size_estimate + model_size_estimate > MAX_FILE_SIZE_ESTIMATE or
|
|
(total_records > self.MAX_RECORDS_PER_FILE and current_file_size_estimate > 0)
|
|
)
|
|
|
|
if needs_new_file and current_wb is not None:
|
|
# Guardar archivo actual
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
current_wb = None
|
|
current_file_size_estimate = 0
|
|
|
|
if current_wb is None:
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active)
|
|
|
|
# Manejar modelos que exceden el límite por hoja
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear hoja para esta parte
|
|
sheet_name = f"{model_name[:20]}_p{page_num}"[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
# Actualizar tamaño estimado
|
|
page_size = self.estimate_excel_file_size(len(page.object_list), len(fields))
|
|
current_file_size_estimate += page_size
|
|
|
|
else:
|
|
# Modelo pequeño, una hoja
|
|
sheet_name = model_name[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_size_estimate += model_size_estimate
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Guardar último archivo si existe
|
|
if current_wb is not None:
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
class ExportModelView(APIView):
|
|
my_tags = ['Reportes']
|
|
permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
@swagger_auto_schema(
|
|
manual_parameters=[
|
|
openapi.Parameter('model', openapi.IN_QUERY, description="Nombre del modelo (ejemplo: Registro500)",
|
|
type=openapi.TYPE_STRING, required=True)
|
|
],
|
|
responses={200: openapi.Response('Campos disponibles', schema=openapi.Schema(
|
|
type=openapi.TYPE_OBJECT,
|
|
properties={
|
|
'fields': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_STRING))
|
|
}
|
|
))}
|
|
)
|
|
def get(self, request, *args, **kwargs):
|
|
"""
|
|
Devuelve los campos disponibles para el modelo solicitado.
|
|
Ejemplo: /api/reports/exportmodel/?model=Registro500
|
|
"""
|
|
model_name = request.query_params.get('model')
|
|
module = request.query_params.get('module', 'datastage')
|
|
if not model_name:
|
|
return Response({'error': 'model is required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
try:
|
|
model = apps.get_model(module, model_name)
|
|
except LookupError:
|
|
return Response({'error': f'Model {model_name} not found in app {module}'}, status=status.HTTP_404_NOT_FOUND)
|
|
fields = [f.name for f in model._meta.fields]
|
|
return Response({'fields': fields})
|
|
|
|
@swagger_auto_schema(
|
|
request_body=ExportModelSerializer,
|
|
responses={200: 'Archivo generado (Excel o CSV)'}
|
|
)
|
|
def post(self, request, *args, **kwargs):
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
filters = request.data.get('filters', {})
|
|
export_type = request.data.get('type', 'csv')
|
|
module = request.data.get('module', 'datastage')
|
|
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
if export_type == 'excel':
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
|
|
# Create your views here.
|
|
|
|
|
|
class ExportModelView(APIView):
|
|
my_tags = ['Reportes']
|
|
permission_classes = [IsAuthenticated & (
|
|
IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
@swagger_auto_schema(request_body=ExportModelSerializer, esponses={200: 'Archivo generado (Excel o CSV)'})
|
|
def post(self, request, *args, **kwargs):
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
filters = request.data.get('filters', {})
|
|
filters['organizacion__id'] = self.request.user.organizacion.id if hasattr(request.user, 'organizacion') and request.user.organizacion else None
|
|
export_type = request.data.get('type', 'csv')
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
module = request.data.get('module', 'datastage')
|
|
if export_type == 'excel':
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
# Resumen general para dashboard
|
|
|
|
|
|
@api_view(['GET'])
|
|
@permission_classes([
|
|
IsAuthenticated
|
|
])
|
|
def dashboard_summary(request):
|
|
org_id = request.query_params.get('organizacion_id')
|
|
filters = {}
|
|
user = request.user
|
|
|
|
pedimento_app = request.query_params.get('pedimento_app')
|
|
aduana = request.query_params.get('aduana')
|
|
patente = request.query_params.get('patente')
|
|
regimen = request.query_params.get('regimen')
|
|
agente_aduanal = request.query_params.get('agente_aduanal')
|
|
tipo_operacion = request.query_params.get('tipo_operacion')
|
|
fecha_pago_gte = request.query_params.get('fecha_pago__gte')
|
|
fecha_pago_lte = request.query_params.get('fecha_pago__lte')
|
|
contribuyente__rfc = request.query_params.get('contribuyente__rfc')
|
|
|
|
# Si no se especifica organización y el usuario tiene organización, usarla
|
|
if not org_id and hasattr(user, 'organizacion') and user.organizacion:
|
|
org_id = user.organizacion.id
|
|
# Si no es superusuario, filtrar por organización
|
|
if org_id and not getattr(user, 'is_superuser', False):
|
|
filters['organizacion_id'] = org_id
|
|
|
|
# Si el usuario pertenece al grupo Importador, filtrar por RFC
|
|
if user.groups.filter(name='Importador').exists():
|
|
rfc = getattr(user, 'rfc', None)
|
|
if rfc:
|
|
filters['contribuyente__rfc'] = rfc
|
|
|
|
if pedimento_app:
|
|
filters['pedimento_app'] = pedimento_app
|
|
if aduana:
|
|
filters['aduana'] = aduana
|
|
if patente:
|
|
filters['patente'] = patente
|
|
if regimen:
|
|
filters['regimen'] = regimen
|
|
if agente_aduanal:
|
|
filters['agente_aduanal'] = agente_aduanal
|
|
if tipo_operacion:
|
|
filters['tipo_operacion__tipo'] = tipo_operacion
|
|
if fecha_pago_gte:
|
|
filters['fecha_pago__gte'] = fecha_pago_gte
|
|
if fecha_pago_lte:
|
|
filters['fecha_pago__lte'] = fecha_pago_lte
|
|
if contribuyente__rfc:
|
|
filters['contribuyente__rfc'] = contribuyente__rfc
|
|
# Filtrar pedimentos
|
|
pedimentos_qs = Pedimento.objects.filter(**filters)
|
|
pedimentos_total = pedimentos_qs.count()
|
|
pedimentos_completos = pedimentos_qs.filter(existe_expediente=True).count()
|
|
pedimentos_pendientes = pedimentos_total - pedimentos_completos
|
|
|
|
# Usar los IDs de pedimentos filtrados para los demás modelos
|
|
pedimento_ids = list(pedimentos_qs.values_list('id', flat=True))
|
|
|
|
coves_total = Cove.objects.filter(pedimento_id__in=pedimento_ids).count()
|
|
coves_procesados = Cove.objects.filter(
|
|
pedimento_id__in=pedimento_ids, cove_descargado=True).count()
|
|
acuse_coves_procesados = Cove.objects.filter(
|
|
pedimento_id__in=pedimento_ids, acuse_cove_descargado=True).count()
|
|
acuse_coves_pendientes = coves_total - acuse_coves_procesados
|
|
coves_pendientes = coves_total - coves_procesados
|
|
|
|
edocs_total = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
edocs_descargados = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids, edocument_descargado=True).count()
|
|
acuse_descargados = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids, acuse_descargado=True).count()
|
|
edocs_pendientes = edocs_total - edocs_descargados
|
|
acuses_pendientes = edocs_total - acuse_descargados
|
|
|
|
remesas_total = Document.objects.filter(
|
|
document_type__id=3, pedimento_id__in=pedimento_ids).count()
|
|
documentos_descargados = Document.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
partidas_total = Partida.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
partidas_descargadas = Partida.objects.filter(
|
|
pedimento_id__in=pedimento_ids, descargado=True).count()
|
|
partidas_pendientes = partidas_total - partidas_descargadas
|
|
# Indicadores de cumplimiento
|
|
cumplimiento_pedimentos = (
|
|
pedimentos_completos / pedimentos_total * 100) if pedimentos_total else 0
|
|
cumplimiento_acuse_coves = (
|
|
acuse_coves_procesados / coves_total * 100) if coves_total else 0
|
|
cumplimiento_coves = (
|
|
coves_procesados / coves_total * 100) if coves_total else 0
|
|
cumplimiento_edocs = (edocs_descargados /
|
|
edocs_total * 100) if edocs_total else 0
|
|
cumplimiento_acuses = (acuse_descargados /
|
|
edocs_total * 100) if edocs_total else 0
|
|
cumplimiento_partidas = (partidas_descargadas /
|
|
partidas_total * 100) if partidas_total else 0
|
|
|
|
# Calcular cumplimiento total (promedio de todos los indicadores)
|
|
indicadores = [
|
|
cumplimiento_pedimentos,
|
|
cumplimiento_coves,
|
|
cumplimiento_acuse_coves,
|
|
cumplimiento_edocs,
|
|
cumplimiento_acuses,
|
|
cumplimiento_partidas
|
|
]
|
|
cumplimiento_total = sum(indicadores) / len(indicadores) if indicadores else 0
|
|
|
|
return Response({
|
|
"cumplimiento_total": round(cumplimiento_total, 2),
|
|
"pedimentos": {
|
|
"total": pedimentos_total,
|
|
"completos": pedimentos_completos,
|
|
"pendientes": pedimentos_pendientes,
|
|
"cumplimiento": round(cumplimiento_pedimentos, 2)
|
|
},
|
|
"coves": {
|
|
"total": coves_total,
|
|
"coves_procesados": coves_procesados,
|
|
"coves_pendientes": coves_pendientes,
|
|
"coves_cumplimiento": round(cumplimiento_coves, 2),
|
|
|
|
},
|
|
"acuse_coves": {
|
|
"total": coves_total,
|
|
"acuse_coves_procesados": acuse_coves_procesados,
|
|
"acuse_coves_pendientes": acuse_coves_pendientes,
|
|
"acuse_coves_cumplimiento": round(cumplimiento_acuse_coves, 2)
|
|
},
|
|
"edocuments": {
|
|
"total": edocs_total,
|
|
"edocs_descargados": edocs_descargados,
|
|
"edocs_pendientes": edocs_pendientes,
|
|
"edocs_cumplimiento": round(cumplimiento_edocs, 2),
|
|
},
|
|
"acuses":{
|
|
"total": edocs_total,
|
|
"acuse_descargados": acuse_descargados,
|
|
"acuses_pendientes": acuses_pendientes,
|
|
"acuses_cumplimiento": round(cumplimiento_acuses, 2)
|
|
},
|
|
"remesas": {
|
|
"total": remesas_total
|
|
},
|
|
"documentos": {
|
|
"descargados": documentos_descargados
|
|
},
|
|
"partidas": {
|
|
"total": partidas_total,
|
|
"partidas_descargadas": partidas_descargadas,
|
|
"partidas_pendientes": partidas_pendientes,
|
|
"cumplimiento": round(cumplimiento_partidas, 2)
|
|
}
|
|
})
|