1115 lines
50 KiB
Python
1115 lines
50 KiB
Python
from warnings import filters
|
|
from rest_framework.decorators import api_view, permission_classes
|
|
from rest_framework.permissions import IsAuthenticated
|
|
from api.customs.models import Pedimento, Cove, EDocument, Partida
|
|
from api.record.models import Document
|
|
from api.organization.models import Organizacion
|
|
from django.db.models import Count, Q
|
|
|
|
# Registrar endpoint en urls.py:
|
|
# path('dashboard/summary/', dashboard_summary)
|
|
import csv
|
|
import io
|
|
from drf_yasg.utils import swagger_auto_schema
|
|
from drf_yasg import openapi
|
|
from .serializers import ExportModelSerializer
|
|
from rest_framework.response import Response
|
|
from django.http import HttpResponse
|
|
import openpyxl
|
|
from django.apps import apps
|
|
from rest_framework import status
|
|
from django.shortcuts import render
|
|
from rest_framework import viewsets
|
|
|
|
from .serializers import ExportModelSerializer
|
|
from core.permissions import (
|
|
IsSameOrganization,
|
|
IsSameOrganizationDeveloper,
|
|
IsSameOrganizationAndAdmin,
|
|
IsSuperUser
|
|
)
|
|
from rest_framework.permissions import IsAuthenticated
|
|
|
|
import csv
|
|
import io
|
|
import openpyxl
|
|
from django.http import HttpResponse
|
|
from django.apps import apps
|
|
from rest_framework.views import APIView
|
|
from rest_framework.response import Response
|
|
from rest_framework import status
|
|
from drf_yasg.utils import swagger_auto_schema
|
|
from drf_yasg import openapi
|
|
from rest_framework.permissions import IsAuthenticated
|
|
from core.permissions import (
|
|
IsSameOrganization,
|
|
IsSameOrganizationDeveloper,
|
|
IsSameOrganizationAndAdmin,
|
|
IsSuperUser
|
|
)
|
|
from .serializers import ExportModelSerializer
|
|
import uuid
|
|
import datetime
|
|
import zipfile
|
|
from django.db import models
|
|
|
|
def export_model_to_csv(request, model_name, fields, module='datastage', filters=None):
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**(filters or {})).values(*fields)
|
|
response = HttpResponse(content_type='text/csv')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}.csv"'
|
|
writer = csv.DictWriter(response, fieldnames=fields)
|
|
writer.writeheader()
|
|
for row in queryset:
|
|
writer.writerow(row)
|
|
return response
|
|
|
|
|
|
def export_model_to_excel(request, model_name, fields, module='datastage', filters=None):
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**(filters or {})).values(*fields)
|
|
wb = openpyxl.Workbook()
|
|
ws = wb.active
|
|
ws.append(fields)
|
|
for row in queryset:
|
|
# Convertir cada valor a string para asegurar compatibilidad con Excel
|
|
row_values = []
|
|
for field in fields:
|
|
value = row[field]
|
|
# Si es UUID u otro objeto, convertirlo a string
|
|
if hasattr(value, '__str__'):
|
|
value = str(value)
|
|
row_values.append(value)
|
|
ws.append(row_values)
|
|
output = io.BytesIO()
|
|
wb.save(output)
|
|
output.seek(0)
|
|
response = HttpResponse(output.read(
|
|
), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}.xlsx"'
|
|
return response
|
|
|
|
# class ControlPedimentoView(APIView):
|
|
# my_tags = ['Control-Pedimento']
|
|
# permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
# @swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'})
|
|
# def post(self, request, *args, **kwargs):
|
|
# """
|
|
# Endpoint específico para exportación de DataStage con soporte múltiple
|
|
# """
|
|
# # Verificar si es modo múltiple
|
|
# modo = request.data.get('modo', 'simple')
|
|
|
|
# if modo == 'multiple':
|
|
# return self.handle_multiple_export(request)
|
|
# else:
|
|
# return self.handle_simple_export(request)
|
|
|
|
|
|
|
|
class ExportDataStageView(APIView):
|
|
my_tags = ['Reportes-DataStage']
|
|
permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
# Constantes para partición
|
|
# MAX_RECORDS_PER_FILE = 100 # Límite seguro por archivo
|
|
MAX_RECORDS_PER_FILE = 120000 # Límite seguro por archivo
|
|
|
|
def safe_excel_value(self, value):
|
|
"""
|
|
Convierte cualquier valor a un formato seguro para Excel
|
|
"""
|
|
if value is None:
|
|
return ''
|
|
elif isinstance(value, (uuid.UUID,)):
|
|
return str(value)
|
|
elif hasattr(value, 'uuid'):
|
|
return str(value.uuid)
|
|
elif hasattr(value, 'id'):
|
|
return str(value.id)
|
|
elif isinstance(value, (datetime.datetime, datetime.date)):
|
|
return value.isoformat()
|
|
elif isinstance(value, (dict, list)):
|
|
return str(value)
|
|
else:
|
|
return str(value)
|
|
|
|
@swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'})
|
|
def post(self, request, *args, **kwargs):
|
|
"""
|
|
Endpoint específico para exportación de DataStage con soporte múltiple
|
|
"""
|
|
# Verificar si es modo múltiple
|
|
modo = request.data.get('modo', 'simple')
|
|
|
|
if modo == 'multiple':
|
|
return self.handle_multiple_export(request)
|
|
else:
|
|
return self.handle_simple_export(request)
|
|
|
|
def handle_simple_export(self, request):
|
|
"""Maneja exportación simple de DataStage (un solo modelo)"""
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
global_filters = request.data.get('globalFilters', {})
|
|
export_type = request.data.get('format', 'csv')
|
|
module = 'datastage'
|
|
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
try:
|
|
model = apps.get_model(module, model_name)
|
|
filters = self.apply_global_filters_to_model(global_filters, model, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if export_type == 'excel':
|
|
# Verificar si necesita partición
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_single_model_partitioned(request, model_name, fields, filters, total_records)
|
|
else:
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_single_model_csv_partitioned(request, model_name, fields, filters, total_records)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
except LookupError:
|
|
return Response({'error': f'Model {model_name} not found'}, status=status.HTTP_404_NOT_FOUND)
|
|
|
|
def handle_multiple_export(self, request):
|
|
"""Maneja exportación múltiple de DataStage (varios modelos)"""
|
|
models_data = request.data.get('models', [])
|
|
export_type = request.data.get('format', 'csv')
|
|
global_filters = request.data.get('globalFilters', {})
|
|
|
|
if not models_data:
|
|
return Response({'error': 'models are required for multiple export'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
related_keys = self.get_related_keys_from_filters(global_filters, models_data, request.user)
|
|
|
|
if export_type == 'excel':
|
|
# Siempre usar el método particionado inteligente para Excel
|
|
return self.export_datastage_multiple_partitioned_excel(request, models_data, global_filters, related_keys)
|
|
else:
|
|
# Para CSV, podemos mantener la lógica actual o mejorarla
|
|
total_estimated_records = self.estimate_total_records(models_data, global_filters, related_keys, request.user)
|
|
if total_estimated_records > self.MAX_RECORDS_PER_FILE:
|
|
return self.export_datastage_multiple_partitioned_csv(request, models_data, global_filters, related_keys)
|
|
else:
|
|
return self.export_datastage_multiple_to_csv(request, models_data, global_filters, related_keys)
|
|
|
|
def estimate_total_records(self, models_data, global_filters, related_keys, user):
|
|
"""Estima el total de registros para todos los modelos"""
|
|
total = 0
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, user)
|
|
total += model.objects.filter(**filters).count()
|
|
except:
|
|
continue
|
|
return total
|
|
|
|
def export_datastage_multiple_to_excel(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage con filtrado relacionado (múltiples hojas)"""
|
|
wb = openpyxl.Workbook()
|
|
wb.remove(wb.active)
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
|
|
# 🔥 APLICAR FILTROS RELACIONADOS
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
# Si hay filtros, aplicarlos; si no, obtener todos los registros
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none() # No obtener nada si no hay filtros
|
|
|
|
# Si no hay registros, saltar este modelo
|
|
if queryset.count() == 0:
|
|
continue
|
|
|
|
# Crear hoja (limitar nombre a 31 caracteres)
|
|
sheet_name = model_name[:31]
|
|
ws = wb.create_sheet(title=sheet_name)
|
|
|
|
# Escribir encabezados
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = []
|
|
for field in fields:
|
|
value = row[field]
|
|
# 🔥 USAR safe_excel_value para convertir valores
|
|
row_values.append(self.safe_excel_value(value))
|
|
ws.append(row_values)
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Si no se crearon hojas, crear una vacía
|
|
if len(wb.sheetnames) == 0:
|
|
ws = wb.create_sheet(title="Sin datos")
|
|
ws.append(["No se encontraron datos para los modelos especificados"])
|
|
|
|
output = io.BytesIO()
|
|
wb.save(output)
|
|
output.seek(0)
|
|
|
|
response = HttpResponse(
|
|
output.read(),
|
|
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
|
)
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_related_report.xlsx"'
|
|
return response
|
|
|
|
def export_datastage_multiple_partitioned_excel(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos Excel particionados inteligentemente"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
file_counter = 1
|
|
current_wb = None
|
|
current_file_records_count = 0
|
|
MAX_SHEETS_PER_FILE = 10 # Límite de hojas por archivo Excel
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Si el modelo necesita particionarse (más de MAX_RECORDS_PER_FILE)
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Verificar si necesitamos crear nuevo archivo
|
|
# 1. Si no hay archivo actual
|
|
# 2. Si ya tenemos muchas hojas en este archivo
|
|
# 3. Si este archivo ya está "lleno" (muchos registros)
|
|
if (current_wb is None or
|
|
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
|
|
current_file_records_count > self.MAX_RECORDS_PER_FILE * 3): # ~150K registros
|
|
|
|
if current_wb is not None:
|
|
# Guardar archivo actual en ZIP
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
|
|
# Crear nuevo workbook
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active) # Remover hoja por defecto
|
|
current_file_records_count = 0
|
|
|
|
# Crear hoja para esta parte del modelo
|
|
sheet_name = f"{model_name[:25]}_p{page_num}"
|
|
ws = current_wb.create_sheet(title=sheet_name[:31])
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_records_count += len(page.object_list)
|
|
|
|
else:
|
|
# Modelo pequeño (≤ MAX_RECORDS_PER_FILE)
|
|
# Verificar si necesitamos nuevo archivo
|
|
if (current_wb is None or
|
|
len(current_wb.sheetnames) >= MAX_SHEETS_PER_FILE or
|
|
current_file_records_count + total_records > self.MAX_RECORDS_PER_FILE * 3):
|
|
|
|
if current_wb is not None:
|
|
# Guardar archivo actual
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
|
|
# Crear nuevo workbook
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active)
|
|
current_file_records_count = 0
|
|
|
|
# Crear hoja para este modelo
|
|
sheet_name = model_name[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_records_count += total_records
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Guardar el último workbook si existe
|
|
if current_wb is not None:
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error en exportación particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_datastage_multiple_to_csv(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos CSV en ZIP"""
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
writer.writerow(fields)
|
|
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
def export_datastage_multiple_partitioned_csv(self, request, models_data, global_filters, related_keys):
|
|
"""Exporta múltiples modelos de DataStage a múltiples archivos CSV particionados en ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
|
|
writer.writerow(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}_part{page_num}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
else:
|
|
# Modelo pequeño, exportar completo
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
|
|
# Escribir encabezados
|
|
writer.writerow(fields)
|
|
|
|
# Escribir datos
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
except LookupError as e:
|
|
continue
|
|
except Exception as e:
|
|
continue
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error en exportación CSV particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_single_model_partitioned(self, request, model_name, fields, filters, total_records):
|
|
"""Exporta un solo modelo particionado a ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
module = 'datastage'
|
|
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear Excel para esta parte
|
|
wb = openpyxl.Workbook()
|
|
ws = wb.active
|
|
ws.title = f"Parte_{page_num}"[:31]
|
|
ws.append(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
part_buffer = io.BytesIO()
|
|
wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
|
|
filename = f"{model_name}_part{page_num}.xlsx"
|
|
zip_file.writestr(filename, part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
zip_content = zip_buffer.getvalue()
|
|
|
|
response = HttpResponse(zip_content, content_type='application/zip')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"'
|
|
response['Content-Length'] = len(zip_content)
|
|
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error exportando modelo: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def export_single_model_csv_partitioned(self, request, model_name, fields, filters, total_records):
|
|
"""Exporta un solo modelo CSV particionado a ZIP"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
module = 'datastage'
|
|
|
|
model = apps.get_model(module, model_name)
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
csv_buffer = io.StringIO()
|
|
writer = csv.writer(csv_buffer)
|
|
writer.writerow(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
writer.writerow(row_values)
|
|
|
|
# Agregar al ZIP
|
|
filename = f"{model_name}_part{page_num}.csv"
|
|
zip_file.writestr(filename, csv_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
zip_content = zip_buffer.getvalue()
|
|
|
|
response = HttpResponse(zip_content, content_type='application/zip')
|
|
response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"'
|
|
response['Content-Length'] = len(zip_content)
|
|
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error exportando modelo CSV: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
def get_related_keys_from_filters(self, global_filters, models_data, user):
|
|
"""
|
|
Obtiene patentes, pedimentos y datastages que cumplen EXACTAMENTE con TODOS los filtros globales
|
|
VERSIÓN SIMPLIFICADA - Usa la MISMA lógica que apply_global_filters_to_model
|
|
"""
|
|
related_keys = {
|
|
'patentes': set(),
|
|
'pedimentos': set(),
|
|
'datastage_ids': set()
|
|
}
|
|
|
|
# Si no hay filtros, retornar vacío
|
|
if not any(v for v in global_filters.values() if v not in [None, '']):
|
|
return {}
|
|
|
|
all_records_with_filters = []
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
|
|
# ¡USAR LA MISMA FUNCIÓN QUE EN MODO SINGULAR!
|
|
filters = self.apply_global_filters_to_model(global_filters, model, user)
|
|
|
|
if filters:
|
|
# EJECUTAR CONSULTA - IDÉNTICO A MODO SINGULAR
|
|
queryset = model.objects.filter(**filters)
|
|
total = queryset.count()
|
|
|
|
# VERIFICACIÓN ESPECIAL PARA RFC
|
|
if 'rfc' in filters:
|
|
rfc_value = filters['rfc']
|
|
# Doble verificación: contar registros con ese RFC exacto
|
|
rfc_exact_count = queryset.filter(rfc=rfc_value).count()
|
|
|
|
if rfc_exact_count != total:
|
|
try:
|
|
other_rfcs = queryset.exclude(rfc=rfc_value).values_list('rfc', flat=True).distinct()[:5]
|
|
except:
|
|
pass
|
|
|
|
# Obtener registros
|
|
records = queryset.values('patente', 'pedimento', 'datastage_id')
|
|
all_records_with_filters.extend(list(records))
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
if not all_records_with_filters:
|
|
return {'patentes': set(), 'pedimentos': set(), 'datastage_ids': set()}
|
|
|
|
for record in all_records_with_filters:
|
|
if record.get('patente'):
|
|
related_keys['patentes'].add(record['patente'])
|
|
if record.get('pedimento'):
|
|
related_keys['pedimentos'].add(record['pedimento'])
|
|
if record.get('datastage_id'):
|
|
related_keys['datastage_ids'].add(record['datastage_id'])
|
|
|
|
return {k: list(v) for k, v in related_keys.items() if v}
|
|
|
|
def apply_global_filters_to_model(self, global_filters, model, user):
|
|
"""
|
|
Aplica filtros globales - VERSIÓN CORREGIDA CON UUID
|
|
"""
|
|
|
|
filters = {}
|
|
model_fields = [f.name for f in model._meta.get_fields()]
|
|
|
|
# ORGANIZACIÓN - Manejar como UUID
|
|
org_value = global_filters.get('organizacion')
|
|
if org_value and org_value != '' and 'organizacion' in model_fields:
|
|
field = model._meta.get_field('organizacion')
|
|
|
|
if hasattr(field, 'related_model'): # Es ForeignKey
|
|
# Convertir string a UUID
|
|
try:
|
|
import uuid
|
|
org_uuid = uuid.UUID(org_value)
|
|
filters['organizacion_id'] = org_uuid
|
|
except Exception as e:
|
|
# Fallback: dejar como string (puede no funcionar)
|
|
filters['organizacion_id'] = org_value
|
|
else: # Es CharField
|
|
filters['organizacion'] = org_value
|
|
|
|
# RFC - Manejar normalmente
|
|
rfc_value = global_filters.get('rfc')
|
|
if rfc_value and rfc_value != '' and 'rfc' in model_fields:
|
|
filters['rfc'] = rfc_value
|
|
|
|
# PATENTE
|
|
if global_filters.get('patente'):
|
|
filters['patente'] = global_filters['patente']
|
|
|
|
# PEDIMENTO
|
|
if global_filters.get('pedimento'):
|
|
filters['pedimento'] = global_filters['pedimento']
|
|
|
|
# FECHAS
|
|
if 'fecha_pago_real' in model_fields:
|
|
if global_filters.get('fecha_pago_desde'):
|
|
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
|
|
|
|
if global_filters.get('fecha_pago_hasta'):
|
|
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
|
|
|
|
return filters
|
|
|
|
def apply_related_filters(self, global_filters, model, related_keys, user):
|
|
filters = {}
|
|
model_fields = [f.name for f in model._meta.get_fields()]
|
|
|
|
# 1. Organización
|
|
if 'organizacion' in model_fields and global_filters.get('organizacion'):
|
|
filters['organizacion'] = global_filters['organizacion']
|
|
|
|
# 2. RFC (¡ESTO ES LO QUE FALTA!)
|
|
if 'rfc' in model_fields and global_filters.get('rfc'):
|
|
filters['rfc'] = global_filters['rfc']
|
|
|
|
# 3. Fechas (SIEMPRE se aplican)
|
|
if 'fecha_pago_real' in model_fields:
|
|
if global_filters.get('fecha_pago_desde'):
|
|
filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde']
|
|
|
|
if global_filters.get('fecha_pago_hasta'):
|
|
filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta']
|
|
|
|
# 🔥 SEGUNDO: Si hay related_keys, AÑADIRLAS a los filtros existentes
|
|
if any(related_keys.values()):
|
|
|
|
# Añadir patentes si existen
|
|
if related_keys.get('patentes') and 'patente' in model_fields:
|
|
filters['patente__in'] = related_keys['patentes']
|
|
|
|
# Añadir pedimentos si existen
|
|
if related_keys.get('pedimentos') and 'pedimento' in model_fields:
|
|
filters['pedimento__in'] = related_keys['pedimentos']
|
|
|
|
# Añadir datastage_ids si existen
|
|
if related_keys.get('datastage_ids') and 'datastage_id' in model_fields:
|
|
filters['datastage_id__in'] = related_keys['datastage_ids']
|
|
|
|
else:
|
|
# Solo patente y pedimento específicos (no listas)
|
|
if 'patente' in model_fields and global_filters.get('patente'):
|
|
filters['patente'] = global_filters['patente']
|
|
|
|
if 'pedimento' in model_fields and global_filters.get('pedimento'):
|
|
filters['pedimento'] = global_filters['pedimento']
|
|
|
|
return filters
|
|
|
|
def estimate_excel_file_size(self, num_records, num_columns):
|
|
"""Estima tamaño aproximado del archivo Excel"""
|
|
# Estimación aproximada: 100 bytes por celda
|
|
return num_records * num_columns * 100
|
|
|
|
def export_with_size_control(self, request, models_data, global_filters, related_keys):
|
|
"""Versión con control de tamaño de archivo"""
|
|
try:
|
|
zip_buffer = io.BytesIO()
|
|
|
|
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
|
file_counter = 1
|
|
current_wb = None
|
|
current_file_size_estimate = 0
|
|
MAX_FILE_SIZE_ESTIMATE = 50 * 1024 * 1024 # 50MB estimado
|
|
|
|
for model_data in models_data:
|
|
model_name = model_data.get('model')
|
|
fields = model_data.get('fields', [])
|
|
|
|
if not model_name or not fields:
|
|
continue
|
|
|
|
try:
|
|
model = apps.get_model('datastage', model_name)
|
|
filters = self.apply_related_filters(global_filters, model, related_keys, request.user)
|
|
|
|
if filters:
|
|
queryset = model.objects.filter(**filters).values(*fields)
|
|
else:
|
|
queryset = model.objects.none()
|
|
|
|
total_records = queryset.count()
|
|
|
|
if total_records == 0:
|
|
continue
|
|
|
|
# Calcular tamaño estimado para este modelo
|
|
model_size_estimate = self.estimate_excel_file_size(total_records, len(fields))
|
|
|
|
# Si el modelo es muy grande o no cabe en el archivo actual
|
|
needs_new_file = (
|
|
current_wb is None or
|
|
current_file_size_estimate + model_size_estimate > MAX_FILE_SIZE_ESTIMATE or
|
|
(total_records > self.MAX_RECORDS_PER_FILE and current_file_size_estimate > 0)
|
|
)
|
|
|
|
if needs_new_file and current_wb is not None:
|
|
# Guardar archivo actual
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
file_counter += 1
|
|
current_wb = None
|
|
current_file_size_estimate = 0
|
|
|
|
if current_wb is None:
|
|
current_wb = openpyxl.Workbook()
|
|
current_wb.remove(current_wb.active)
|
|
|
|
# Manejar modelos que exceden el límite por hoja
|
|
if total_records > self.MAX_RECORDS_PER_FILE:
|
|
from django.core.paginator import Paginator
|
|
paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE)
|
|
|
|
for page_num in paginator.page_range:
|
|
page = paginator.page(page_num)
|
|
|
|
# Crear hoja para esta parte
|
|
sheet_name = f"{model_name[:20]}_p{page_num}"[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
for row in page.object_list:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
# Actualizar tamaño estimado
|
|
page_size = self.estimate_excel_file_size(len(page.object_list), len(fields))
|
|
current_file_size_estimate += page_size
|
|
|
|
else:
|
|
# Modelo pequeño, una hoja
|
|
sheet_name = model_name[:31]
|
|
ws = current_wb.create_sheet(title=sheet_name)
|
|
ws.append(fields)
|
|
|
|
for row in queryset:
|
|
row_values = [self.safe_excel_value(row[field]) for field in fields]
|
|
ws.append(row_values)
|
|
|
|
current_file_size_estimate += model_size_estimate
|
|
|
|
except LookupError:
|
|
continue
|
|
|
|
# Guardar último archivo si existe
|
|
if current_wb is not None:
|
|
part_buffer = io.BytesIO()
|
|
current_wb.save(part_buffer)
|
|
part_buffer.seek(0)
|
|
zip_file.writestr(f"datastage_part{file_counter}.xlsx", part_buffer.getvalue())
|
|
|
|
zip_buffer.seek(0)
|
|
|
|
response = HttpResponse(zip_buffer.read(), content_type='application/zip')
|
|
response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"'
|
|
return response
|
|
|
|
except Exception as e:
|
|
return Response({'error': f'Error: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
|
|
|
class ExportModelView(APIView):
|
|
my_tags = ['Reportes']
|
|
permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
@swagger_auto_schema(
|
|
manual_parameters=[
|
|
openapi.Parameter('model', openapi.IN_QUERY, description="Nombre del modelo (ejemplo: Registro500)",
|
|
type=openapi.TYPE_STRING, required=True)
|
|
],
|
|
responses={200: openapi.Response('Campos disponibles', schema=openapi.Schema(
|
|
type=openapi.TYPE_OBJECT,
|
|
properties={
|
|
'fields': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_STRING))
|
|
}
|
|
))}
|
|
)
|
|
def get(self, request, *args, **kwargs):
|
|
"""
|
|
Devuelve los campos disponibles para el modelo solicitado.
|
|
Ejemplo: /api/reports/exportmodel/?model=Registro500
|
|
"""
|
|
model_name = request.query_params.get('model')
|
|
module = request.query_params.get('module', 'datastage')
|
|
if not model_name:
|
|
return Response({'error': 'model is required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
try:
|
|
model = apps.get_model(module, model_name)
|
|
except LookupError:
|
|
return Response({'error': f'Model {model_name} not found in app {module}'}, status=status.HTTP_404_NOT_FOUND)
|
|
fields = [f.name for f in model._meta.fields]
|
|
return Response({'fields': fields})
|
|
|
|
@swagger_auto_schema(
|
|
request_body=ExportModelSerializer,
|
|
responses={200: 'Archivo generado (Excel o CSV)'}
|
|
)
|
|
def post(self, request, *args, **kwargs):
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
filters = request.data.get('filters', {})
|
|
export_type = request.data.get('type', 'csv')
|
|
module = request.data.get('module', 'datastage')
|
|
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
if export_type == 'excel':
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
|
|
# Create your views here.
|
|
|
|
|
|
class ExportModelView(APIView):
|
|
my_tags = ['Reportes']
|
|
permission_classes = [IsAuthenticated & (
|
|
IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)]
|
|
|
|
@swagger_auto_schema(request_body=ExportModelSerializer, esponses={200: 'Archivo generado (Excel o CSV)'})
|
|
def post(self, request, *args, **kwargs):
|
|
model_name = request.data.get('model')
|
|
fields = request.data.get('fields')
|
|
filters = request.data.get('filters', {})
|
|
filters['organizacion__id'] = self.request.user.organizacion.id if hasattr(request.user, 'organizacion') and request.user.organizacion else None
|
|
export_type = request.data.get('type', 'csv')
|
|
if not model_name or not fields:
|
|
return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
module = request.data.get('module', 'datastage')
|
|
if export_type == 'excel':
|
|
return export_model_to_excel(request, model_name, fields, module, filters)
|
|
else:
|
|
return export_model_to_csv(request, model_name, fields, module, filters)
|
|
|
|
# Resumen general para dashboard
|
|
|
|
|
|
@api_view(['GET'])
|
|
@permission_classes([
|
|
IsAuthenticated
|
|
])
|
|
def dashboard_summary(request):
|
|
org_id = request.query_params.get('organizacion_id')
|
|
filters = {}
|
|
user = request.user
|
|
|
|
pedimento_app = request.query_params.get('pedimento_app')
|
|
aduana = request.query_params.get('aduana')
|
|
patente = request.query_params.get('patente')
|
|
regimen = request.query_params.get('regimen')
|
|
agente_aduanal = request.query_params.get('agente_aduanal')
|
|
tipo_operacion = request.query_params.get('tipo_operacion')
|
|
fecha_pago_gte = request.query_params.get('fecha_pago__gte')
|
|
fecha_pago_lte = request.query_params.get('fecha_pago__lte')
|
|
contribuyente__rfc = request.query_params.get('contribuyente__rfc')
|
|
|
|
# Si no se especifica organización y el usuario tiene organización, usarla
|
|
if not org_id and hasattr(user, 'organizacion') and user.organizacion:
|
|
org_id = user.organizacion.id
|
|
# Si no es superusuario, filtrar por organización
|
|
if org_id and not getattr(user, 'is_superuser', False):
|
|
filters['organizacion_id'] = org_id
|
|
|
|
# Si el usuario pertenece al grupo Importador, filtrar por RFC
|
|
if user.groups.filter(name='Importador').exists():
|
|
rfc = getattr(user, 'rfc', None)
|
|
if rfc:
|
|
filters['contribuyente__rfc'] = rfc
|
|
|
|
if pedimento_app:
|
|
filters['pedimento_app'] = pedimento_app
|
|
if aduana:
|
|
filters['aduana'] = aduana
|
|
if patente:
|
|
filters['patente'] = patente
|
|
if regimen:
|
|
filters['regimen'] = regimen
|
|
if agente_aduanal:
|
|
filters['agente_aduanal'] = agente_aduanal
|
|
if tipo_operacion:
|
|
filters['tipo_operacion__tipo'] = tipo_operacion
|
|
if fecha_pago_gte:
|
|
filters['fecha_pago__gte'] = fecha_pago_gte
|
|
if fecha_pago_lte:
|
|
filters['fecha_pago__lte'] = fecha_pago_lte
|
|
if contribuyente__rfc:
|
|
filters['contribuyente__rfc'] = contribuyente__rfc
|
|
# Filtrar pedimentos
|
|
pedimentos_qs = Pedimento.objects.filter(**filters)
|
|
pedimentos_total = pedimentos_qs.count()
|
|
pedimentos_completos = pedimentos_qs.filter(existe_expediente=True).count()
|
|
pedimentos_pendientes = pedimentos_total - pedimentos_completos
|
|
|
|
# Usar los IDs de pedimentos filtrados para los demás modelos
|
|
pedimento_ids = list(pedimentos_qs.values_list('id', flat=True))
|
|
|
|
coves_total = Cove.objects.filter(pedimento_id__in=pedimento_ids).count()
|
|
coves_procesados = Cove.objects.filter(
|
|
pedimento_id__in=pedimento_ids, cove_descargado=True).count()
|
|
acuse_coves_procesados = Cove.objects.filter(
|
|
pedimento_id__in=pedimento_ids, acuse_cove_descargado=True).count()
|
|
acuse_coves_pendientes = coves_total - acuse_coves_procesados
|
|
coves_pendientes = coves_total - coves_procesados
|
|
|
|
edocs_total = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
edocs_descargados = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids, edocument_descargado=True).count()
|
|
acuse_descargados = EDocument.objects.filter(
|
|
pedimento_id__in=pedimento_ids, acuse_descargado=True).count()
|
|
edocs_pendientes = edocs_total - edocs_descargados
|
|
acuses_pendientes = edocs_total - acuse_descargados
|
|
|
|
remesas_total = Document.objects.filter(
|
|
document_type__id=3, pedimento_id__in=pedimento_ids).count()
|
|
documentos_descargados = Document.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
partidas_total = Partida.objects.filter(
|
|
pedimento_id__in=pedimento_ids).count()
|
|
partidas_descargadas = Partida.objects.filter(
|
|
pedimento_id__in=pedimento_ids, descargado=True).count()
|
|
partidas_pendientes = partidas_total - partidas_descargadas
|
|
# Indicadores de cumplimiento
|
|
cumplimiento_pedimentos = (
|
|
pedimentos_completos / pedimentos_total * 100) if pedimentos_total else 0
|
|
cumplimiento_acuse_coves = (
|
|
acuse_coves_procesados / coves_total * 100) if coves_total else 0
|
|
cumplimiento_coves = (
|
|
coves_procesados / coves_total * 100) if coves_total else 0
|
|
cumplimiento_edocs = (edocs_descargados /
|
|
edocs_total * 100) if edocs_total else 0
|
|
cumplimiento_acuses = (acuse_descargados /
|
|
edocs_total * 100) if edocs_total else 0
|
|
cumplimiento_partidas = (partidas_descargadas /
|
|
partidas_total * 100) if partidas_total else 0
|
|
|
|
# Calcular cumplimiento total (promedio de todos los indicadores)
|
|
indicadores = [
|
|
cumplimiento_pedimentos,
|
|
cumplimiento_coves,
|
|
cumplimiento_acuse_coves,
|
|
cumplimiento_edocs,
|
|
cumplimiento_acuses,
|
|
cumplimiento_partidas
|
|
]
|
|
cumplimiento_total = sum(indicadores) / len(indicadores) if indicadores else 0
|
|
|
|
return Response({
|
|
"cumplimiento_total": round(cumplimiento_total, 2),
|
|
"pedimentos": {
|
|
"total": pedimentos_total,
|
|
"completos": pedimentos_completos,
|
|
"pendientes": pedimentos_pendientes,
|
|
"cumplimiento": round(cumplimiento_pedimentos, 2)
|
|
},
|
|
"coves": {
|
|
"total": coves_total,
|
|
"coves_procesados": coves_procesados,
|
|
"coves_pendientes": coves_pendientes,
|
|
"coves_cumplimiento": round(cumplimiento_coves, 2),
|
|
|
|
},
|
|
"acuse_coves": {
|
|
"total": coves_total,
|
|
"acuse_coves_procesados": acuse_coves_procesados,
|
|
"acuse_coves_pendientes": acuse_coves_pendientes,
|
|
"acuse_coves_cumplimiento": round(cumplimiento_acuse_coves, 2)
|
|
},
|
|
"edocuments": {
|
|
"total": edocs_total,
|
|
"edocs_descargados": edocs_descargados,
|
|
"edocs_pendientes": edocs_pendientes,
|
|
"edocs_cumplimiento": round(cumplimiento_edocs, 2),
|
|
},
|
|
"acuses":{
|
|
"total": edocs_total,
|
|
"acuse_descargados": acuse_descargados,
|
|
"acuses_pendientes": acuses_pendientes,
|
|
"acuses_cumplimiento": round(cumplimiento_acuses, 2)
|
|
},
|
|
"remesas": {
|
|
"total": remesas_total
|
|
},
|
|
"documentos": {
|
|
"descargados": documentos_descargados
|
|
},
|
|
"partidas": {
|
|
"total": partidas_total,
|
|
"partidas_descargadas": partidas_descargadas,
|
|
"partidas_pendientes": partidas_pendientes,
|
|
"cumplimiento": round(cumplimiento_partidas, 2)
|
|
}
|
|
})
|