diff --git a/api/datastage/tasks/report_document.py b/api/datastage/tasks/report_document.py new file mode 100644 index 0000000..fa944ab --- /dev/null +++ b/api/datastage/tasks/report_document.py @@ -0,0 +1,85 @@ +from celery import shared_task +from django.core.files.base import ContentFile +from django.utils import timezone +from api.reports.models import ReportDocument +from api.customs.models import Pedimento, Cove, EDocument, Partida +from django.db.models import Q +import csv +import os +from django.conf import settings +import logging + +logger = logging.getLogger() + +@shared_task +def generate_report_document(report_id): + try: + report = ReportDocument.objects.get(id=report_id) + report.status = 'processing' + report.save(update_fields=['status']) + filters = report.filters or {} + pedimentos_filters = Q() + if filters.get('organizacion_id'): + pedimentos_filters &= Q(organizacion_id=filters['organizacion_id']) + if filters.get('fecha_pago__gte'): + pedimentos_filters &= Q(fecha_pago__gte=filters['fecha_pago__gte']) + if filters.get('fecha_pago__lte'): + pedimentos_filters &= Q(fecha_pago__lte=filters['fecha_pago__lte']) + if filters.get('contribuyente__rfc'): + pedimentos_filters &= Q(contribuyente__rfc=filters['contribuyente__rfc']) + if filters.get('patente'): + pedimentos_filters &= Q(patente=filters['patente']) + if filters.get('aduana'): + pedimentos_filters &= Q(aduana=filters['aduana']) + if filters.get('pedimento'): + pedimentos_filters &= Q(pedimento=filters['pedimento']) + if filters.get('pedimento_app'): + pedimentos_filters &= Q(pedimento_app=filters['pedimento_app']) + if filters.get('regimen'): + pedimentos_filters &= Q(regimen=filters['regimen']) + if filters.get('tipo_operacion'): + pedimentos_filters &= Q(tipo_operacion_id=filters['tipo_operacion']) + pedimentos = Pedimento.objects.filter(pedimentos_filters) + filename = filters.get('filename') + if filename: + filename = f"{filename}.csv" if not filename.endswith('.csv') else filename + else: + filename = f"report_{report.id}_{timezone.now().strftime('%Y%m%d%H%M%S')}.csv" + file_path = os.path.join(settings.MEDIA_ROOT, 'reports', filename) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, 'w', newline='', encoding='utf-8') as f: + writer = csv.writer(f) + headers = [ + 'aduana', 'patente', 'regimen', 'pedimento', 'pedimento_app', 'clave_pedimento', + 'tipo_operacion_id', 'contribuyente_id', 'tipo_documento', 'numero_documento', 'estado', 'acuse_estado' + ] + writer.writerow(headers) + for ped in pedimentos: + for cove in Cove.objects.filter(pedimento=ped): + writer.writerow([ + ped.aduana, ped.patente, ped.regimen, ped.pedimento, ped.pedimento_app, + ped.clave_pedimento, ped.tipo_operacion_id, ped.contribuyente_id, + 'COVE', cove.numero_cove, cove.cove_descargado, cove.acuse_cove_descargado + ]) + for edoc in EDocument.objects.filter(pedimento=ped): + writer.writerow([ + ped.aduana, ped.patente, ped.regimen, ped.pedimento, ped.pedimento_app, + ped.clave_pedimento, ped.tipo_operacion_id, ped.contribuyente_id, + 'EDOC', edoc.numero_edocument, edoc.edocument_descargado, edoc.acuse_descargado + ]) + for partida in Partida.objects.filter(pedimento=ped): + writer.writerow([ + ped.aduana, ped.patente, ped.regimen, ped.pedimento, ped.pedimento_app, + ped.clave_pedimento, ped.tipo_operacion_id, ped.contribuyente_id, + 'PARTIDA', partida.numero_partida, partida.descargado, '' + ]) + with open(file_path, 'rb') as f: + report.file.save(filename, ContentFile(f.read()), save=True) + report.status = 'ready' + report.finished_at = timezone.now() + report.save(update_fields=['status', 'file', 'finished_at']) + except Exception as e: + report.status = 'error' + report.error_message = str(e) + report.finished_at = timezone.now() + report.save(update_fields=['status', 'error_message', 'finished_at']) diff --git a/api/organization/views.py b/api/organization/views.py index 7efb795..cde5d84 100644 --- a/api/organization/views.py +++ b/api/organization/views.py @@ -27,7 +27,7 @@ class ViewSetOrganizacion(LoggingMixin, viewsets.ModelViewSet, OrganizacionFiltr queryset = Organizacion.objects.all() serializer_class = OrganizacionSerializer - filterset_fields = ['nombre', 'descripcion'] + filterset_fields = ['nombre'] my_tags = ['Organizaciones'] diff --git a/api/reports/models.py b/api/reports/models.py index b68985b..11af9ff 100644 --- a/api/reports/models.py +++ b/api/reports/models.py @@ -9,10 +9,15 @@ class ReportDocument(models.Model): ('ready', 'Listo'), ('error', 'Error'), ] + TYPE_REPORT = [ + ('cumplimiento', 'cumplimiento'), + ('control_pedimento', 'control_pedimento'), + ] user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='report_documents') filters = models.JSONField(blank=True, null=True) status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='pending') file = models.FileField(upload_to='reports/', blank=True, null=True) + report_type = models.CharField(max_length=30, choices=TYPE_REPORT, default='cumplimiento') error_message = models.TextField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) finished_at = models.DateTimeField(blank=True, null=True) diff --git a/api/reports/tasks/report_document.py b/api/reports/tasks/report_document.py index 9a99ee5..97d77e2 100644 --- a/api/reports/tasks/report_document.py +++ b/api/reports/tasks/report_document.py @@ -3,7 +3,9 @@ from django.core.files.base import ContentFile from django.utils import timezone from api.reports.models import ReportDocument from api.customs.models import Pedimento, Cove, EDocument, Partida -from django.db.models import Q +from django.db.models import Q, Exists, OuterRef +# from django.db.models import Q, +from api.record.models import Document import csv import os from django.conf import settings @@ -15,7 +17,6 @@ def generate_report_document(report_id): report.status = 'processing' report.save(update_fields=['status']) filters = report.filters or {} - # Construir Q para filtros complejos pedimentos_filters = Q() if filters.get('organizacion_id'): pedimentos_filters &= Q(organizacion_id=filters['organizacion_id']) @@ -83,3 +84,169 @@ def generate_report_document(report_id): report.error_message = str(e) report.finished_at = timezone.now() report.save(update_fields=['status', 'error_message', 'finished_at']) + +@shared_task +def generate_report_control_pedimento(report_id): + try: + + report = ReportDocument.objects.get(id=report_id) + report.status = 'processing' + report.save(update_fields=['status']) + filters = report.filters or {} + + + # Construir filtros + pedimentos_filters = {} + if filters.get('organizacion_id'): + pedimentos_filters['organizacion_id'] = filters['organizacion_id'] + if filters.get('fecha_pago__gte'): + pedimentos_filters['fecha_pago__gte'] = filters['fecha_pago__gte'] + if filters.get('fecha_pago__lte'): + pedimentos_filters['fecha_pago__lte'] = filters['fecha_pago__lte'] + if filters.get('pedimento_app'): + pedimentos_filters['pedimento_app'] = filters['pedimento_app'] + + # pedimentos por organizacion + pedimentos_qs = Pedimento.objects.filter(**pedimentos_filters) + pedimentos_total = pedimentos_qs.count() + + + pedimento_ids = list(pedimentos_qs.values_list('id', flat=True)) + + # inicializar totales + pedimentos_completos = 0 + total_documentos = 0 + documentos_sin_descargar = 0 + + # Para cada pedimento, verificar si está completo + for pedimento in pedimentos_qs: + # Contar documentos de este pedimento + docs_pedimento = 0 + docs_pendientes_pedimento = 0 + + # COVES + coves_count = Cove.objects.filter(pedimento_id=pedimento.id).count() + coves_pendientes = Cove.objects.filter(pedimento_id=pedimento.id, cove_descargado=False).count() + docs_pedimento += coves_count + docs_pendientes_pedimento += coves_pendientes + + # PARTIDAS + partidas_count = Partida.objects.filter(pedimento_id=pedimento.id).count() + partidas_pendientes = Partida.objects.filter(pedimento_id=pedimento.id, descargado=False).count() + docs_pedimento += partidas_count + docs_pendientes_pedimento += partidas_pendientes + + # EDOCUMENTS + edocs_count = EDocument.objects.filter(pedimento_id=pedimento.id).count() + edocs_pendientes = EDocument.objects.filter(pedimento_id=pedimento.id, edocument_descargado=False).count() + docs_pedimento += edocs_count + docs_pendientes_pedimento += edocs_pendientes + + # Acumular totales + total_documentos += docs_pedimento + documentos_sin_descargar += docs_pendientes_pedimento + + # Si no tiene documentos pendientes, está completo + if docs_pendientes_pedimento == 0 and docs_pedimento > 0: + pedimentos_completos += 1 + + # 3. PORCENTAJE + porcentaje_faltantes = (documentos_sin_descargar / total_documentos * 100) if total_documentos > 0 else 0 + + # 4. GENERAR CSV CON DETALLES + filename = f"report_{report.id}_{timezone.now().strftime('%Y%m%d%H%M%S')}.csv" + file_path = os.path.join(settings.MEDIA_ROOT, 'reports', filename) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + + todas_las_filas = [] + + # Recopilar datos detallados - UNA FILA POR CADA DOCUMENTO + for pedimento in pedimentos_qs: + # DATOS BASE DEL PEDIMENTO (se repiten en cada fila) + datos_base_pedimento = [ + pedimento.aduana or '', + pedimento.patente or '', + pedimento.regimen or '', + pedimento.pedimento or '', # No. Pedimento (7 dígitos) + pedimento.pedimento_app or '', # No. Pedimento App completo + pedimento.clave_pedimento or '', + pedimento.tipo_operacion.tipo if pedimento.tipo_operacion else '', + str(pedimento.contribuyente_id) if pedimento.contribuyente_id else '' + ] + + # COVES - Una fila por cada COVE + coves = Cove.objects.filter(pedimento_id=pedimento.id) + for cove in coves: + estado = 'VERDADERO' if cove.cove_descargado else 'FALSO' + fila = datos_base_pedimento + [ + # str(cove.id), # Identificador de documento + cove.numero_cove, + 'COVE', # Tipo de documento + estado + ] + todas_las_filas.append(fila) + + # PARTIDAS - Una fila por cada Partida + partidas = Partida.objects.filter(pedimento_id=pedimento.id) + for partida in partidas: + estado = 'VERDADERO' if partida.descargado else 'FALSO' + fila = datos_base_pedimento + [ + # str(partida.id), + partida.numero_partida, + 'PARTIDA', # Tipo de documento + estado + ] + todas_las_filas.append(fila) + + # EDOCUMENTS - Una fila por cada EDocument + edocuments = EDocument.objects.filter(pedimento_id=pedimento.id) + for edoc in edocuments: + estado = 'VERDADERO' if edoc.edocument_descargado else 'FALSO' + fila = datos_base_pedimento + [ + # str(edoc.id), + edoc.numero_edocument, + 'EDOCUMENT', # Tipo de documento + estado + ] + todas_las_filas.append(fila) + + # 5. ESCRIBIR ARCHIVO CSV + with open(file_path, 'w', newline='', encoding='utf-8') as f: + writer = csv.writer(f) + + # SECCIÓN DE TOTALES + writer.writerow(['RESUMEN DEL REPORTE - CONTROL DE PEDIMENTOS']) + writer.writerow([]) + writer.writerow(['TOTAL DE EXPEDIENTES:', pedimentos_total]) + writer.writerow(['TOTAL DE EXPEDIENTES COMPLETOS:', pedimentos_completos]) + writer.writerow(['TOTAL DE DOCUMENTOS:', total_documentos]) + writer.writerow(['DOCUMENTOS SIN DESCARGAR:', documentos_sin_descargar]) + writer.writerow(['PORCENTAJE DE DOCUMENTOS FALTANTES (%):', f"{porcentaje_faltantes:.2f}%"]) + writer.writerow([]) + writer.writerow([]) + + # ENCABEZADOS DE DATOS (según requerimiento) + headers = [ + 'ADUANA', 'PATENTE', 'REGIMEN', 'NO. PEDIMENTO', 'PEDIMENTO_APP', + 'CLAVE_PEDIMENTO', 'TIPO_OPERACION', 'CONTRIBUYENTE_ID', + 'IDENTIFICADOR_DOCUMENTO', 'TIPO_DOCUMENTO', 'ESTADO' + ] + writer.writerow(headers) + + # DATOS DETALLADOS + for fila in todas_las_filas: + writer.writerow(fila) + + + with open(file_path, 'rb') as f: + report.file.save(filename, ContentFile(f.read()), save=True) + + report.status = 'ready' + report.finished_at = timezone.now() + report.save(update_fields=['status', 'file', 'finished_at']) + + except Exception as e: + report.status = 'error' + report.error_message = str(e) + report.finished_at = timezone.now() + report.save(update_fields=['status', 'error_message', 'finished_at']) \ No newline at end of file diff --git a/api/reports/urls.py b/api/reports/urls.py index d05deff..516ebfd 100644 --- a/api/reports/urls.py +++ b/api/reports/urls.py @@ -1,10 +1,12 @@ from django.urls import path, include -from .views import ExportModelView, dashboard_summary +from .views import ExportModelView, ExportDataStageView, dashboard_summary # from .views_stats import documentos_por_fecha -from .views_table import table_summary, report_document_status, report_document_list, report_document_download +from .views_table import table_summary, report_document_status, report_document_list, report_document_download, control_pedimento urlpatterns = [ path('exportmodel/', ExportModelView.as_view(), name='export-model'), + path('exportmodel/datastage/', ExportDataStageView.as_view(), name='export-datastage-model'), + path('control-pedimento/', control_pedimento, name='control_pedimento'), path('dashboard/summary/', dashboard_summary, name='dashboard-summary'), #path('documentos-por-fecha/', documentos_por_fecha, name='documentos-por-fecha'), path('table-summary/', table_summary, name='table-summary'), diff --git a/api/reports/views.py b/api/reports/views.py index 2f5ef65..d12d31a 100644 --- a/api/reports/views.py +++ b/api/reports/views.py @@ -48,7 +48,10 @@ from core.permissions import ( IsSuperUser ) from .serializers import ExportModelSerializer - +import uuid +import datetime +import zipfile +from django.db import models def export_model_to_csv(request, model_name, fields, module='datastage', filters=None): model = apps.get_model(module, model_name) @@ -86,11 +89,657 @@ def export_model_to_excel(request, model_name, fields, module='datastage', filte response['Content-Disposition'] = f'attachment; filename="{model_name}.xlsx"' return response +# class ControlPedimentoView(APIView): +# my_tags = ['Control-Pedimento'] +# permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)] + +# @swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'}) +# def post(self, request, *args, **kwargs): +# """ +# Endpoint específico para exportación de DataStage con soporte múltiple +# """ +# # Verificar si es modo múltiple +# modo = request.data.get('modo', 'simple') + +# if modo == 'multiple': +# return self.handle_multiple_export(request) +# else: +# return self.handle_simple_export(request) + + + +class ExportDataStageView(APIView): + my_tags = ['Reportes-DataStage'] + permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)] + + # Constantes para partición + # MAX_RECORDS_PER_FILE = 100 # Límite seguro por archivo + MAX_RECORDS_PER_FILE = 50000 # Límite seguro por archivo + + def safe_excel_value(self, value): + """ + Convierte cualquier valor a un formato seguro para Excel + """ + if value is None: + return '' + elif isinstance(value, (uuid.UUID,)): + return str(value) + elif hasattr(value, 'uuid'): + return str(value.uuid) + elif hasattr(value, 'id'): + return str(value.id) + elif isinstance(value, (datetime.datetime, datetime.date)): + return value.isoformat() + elif isinstance(value, (dict, list)): + return str(value) + else: + return str(value) + + @swagger_auto_schema(request_body=ExportModelSerializer, responses={200: 'Archivo generado (Excel o CSV)'}) + def post(self, request, *args, **kwargs): + """ + Endpoint específico para exportación de DataStage con soporte múltiple + """ + # Verificar si es modo múltiple + modo = request.data.get('modo', 'simple') + + if modo == 'multiple': + return self.handle_multiple_export(request) + else: + return self.handle_simple_export(request) + + def handle_simple_export(self, request): + """Maneja exportación simple de DataStage (un solo modelo)""" + model_name = request.data.get('model') + fields = request.data.get('fields') + global_filters = request.data.get('globalFilters', {}) + export_type = request.data.get('format', 'csv') + module = 'datastage' + + if not model_name or not fields: + return Response({'error': 'model and fields are required'}, status=status.HTTP_400_BAD_REQUEST) + + try: + model = apps.get_model(module, model_name) + filters = self.apply_global_filters_to_model(global_filters, model, request.user) + + queryset = model.objects.filter(**filters).values(*fields) + total_records = queryset.count() + + if export_type == 'excel': + # Verificar si necesita partición + if total_records > self.MAX_RECORDS_PER_FILE: + return self.export_single_model_partitioned(request, model_name, fields, filters, total_records) + else: + return export_model_to_excel(request, model_name, fields, module, filters) + else: + if total_records > self.MAX_RECORDS_PER_FILE: + return self.export_single_model_csv_partitioned(request, model_name, fields, filters, total_records) + else: + return export_model_to_csv(request, model_name, fields, module, filters) + + except LookupError: + return Response({'error': f'Model {model_name} not found'}, status=status.HTTP_404_NOT_FOUND) + + def handle_multiple_export(self, request): + """Maneja exportación múltiple de DataStage (varios modelos)""" + models_data = request.data.get('models', []) + export_type = request.data.get('format', 'csv') + global_filters = request.data.get('globalFilters', {}) + + if not models_data: + return Response({'error': 'models are required for multiple export'}, status=status.HTTP_400_BAD_REQUEST) + + related_keys = self.get_related_keys_from_filters(global_filters, models_data, request.user) + total_estimated_records = self.estimate_total_records(models_data, global_filters, related_keys, request.user) + + if total_estimated_records > self.MAX_RECORDS_PER_FILE: + if export_type == 'excel': + return self.export_datastage_multiple_partitioned_excel(request, models_data, global_filters, related_keys) + else: + return self.export_datastage_multiple_partitioned_csv(request, models_data, global_filters, related_keys) + else: + if export_type == 'excel': + return self.export_datastage_multiple_to_excel(request, models_data, global_filters, related_keys) + else: + return self.export_datastage_multiple_to_csv(request, models_data, global_filters, related_keys) + + def estimate_total_records(self, models_data, global_filters, related_keys, user): + """Estima el total de registros para todos los modelos""" + total = 0 + for model_data in models_data: + model_name = model_data.get('model') + try: + model = apps.get_model('datastage', model_name) + filters = self.apply_related_filters(global_filters, model, related_keys, user) + total += model.objects.filter(**filters).count() + except: + continue + return total + + def export_datastage_multiple_to_excel(self, request, models_data, global_filters, related_keys): + """Exporta múltiples modelos de DataStage con filtrado relacionado (múltiples hojas)""" + wb = openpyxl.Workbook() + wb.remove(wb.active) + + for model_data in models_data: + model_name = model_data.get('model') + fields = model_data.get('fields', []) + + if not model_name or not fields: + continue + + try: + model = apps.get_model('datastage', model_name) + + # 🔥 APLICAR FILTROS RELACIONADOS + filters = self.apply_related_filters(global_filters, model, related_keys, request.user) + + # Si hay filtros, aplicarlos; si no, obtener todos los registros + if filters: + queryset = model.objects.filter(**filters).values(*fields) + else: + queryset = model.objects.none() # No obtener nada si no hay filtros + + # Si no hay registros, saltar este modelo + if queryset.count() == 0: + continue + + # Crear hoja (limitar nombre a 31 caracteres) + sheet_name = model_name[:31] + ws = wb.create_sheet(title=sheet_name) + + # Escribir encabezados + ws.append(fields) + + # Escribir datos + for row in queryset: + row_values = [] + for field in fields: + value = row[field] + # 🔥 USAR safe_excel_value para convertir valores + row_values.append(self.safe_excel_value(value)) + ws.append(row_values) + + except LookupError: + continue + + # Si no se crearon hojas, crear una vacía + if len(wb.sheetnames) == 0: + ws = wb.create_sheet(title="Sin datos") + ws.append(["No se encontraron datos para los modelos especificados"]) + + output = io.BytesIO() + wb.save(output) + output.seek(0) + + response = HttpResponse( + output.read(), + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = 'attachment; filename="datastage_related_report.xlsx"' + return response + + def export_datastage_multiple_partitioned_excel(self, request, models_data, global_filters, related_keys): + """Exporta múltiples modelos de DataStage a múltiples archivos Excel particionados""" + try: + zip_buffer = io.BytesIO() + + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: + + for model_data in models_data: + model_name = model_data.get('model') + fields = model_data.get('fields', []) + + if not model_name or not fields: + continue + + try: + model = apps.get_model('datastage', model_name) + filters = self.apply_related_filters(global_filters, model, related_keys, request.user) + + # Si hay filtros, aplicarlos; si no, obtener todos los registros + if filters: + queryset = model.objects.filter(**filters).values(*fields) + else: + queryset = model.objects.none() # No obtener nada si no hay filtros + + total_records = queryset.count() + + if total_records == 0: + continue + + if total_records > self.MAX_RECORDS_PER_FILE: + from django.core.paginator import Paginator + paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE) + + for page_num in paginator.page_range: + page = paginator.page(page_num) + + wb = openpyxl.Workbook() + ws = wb.active + ws.title = f"Parte_{page_num}"[:31] + + ws.append(fields) + + for row in page.object_list: + row_values = [self.safe_excel_value(row[field]) for field in fields] + ws.append(row_values) + + # Guardar parte en ZIP + part_buffer = io.BytesIO() + wb.save(part_buffer) + part_buffer.seek(0) + + filename = f"{model_name}_part{page_num}.xlsx" + zip_file.writestr(filename, part_buffer.getvalue()) + + else: + wb = openpyxl.Workbook() + ws = wb.active + ws.title = "Datos"[:31] + + ws.append(fields) + + # Escribir datos + for row in queryset: + row_values = [self.safe_excel_value(row[field]) for field in fields] + ws.append(row_values) + + part_buffer = io.BytesIO() + wb.save(part_buffer) + part_buffer.seek(0) + + filename = f"{model_name}.xlsx" + zip_file.writestr(filename, part_buffer.getvalue()) + + except LookupError as e: + continue + except Exception as e: + continue + + zip_buffer.seek(0) + zip_content = zip_buffer.getvalue() + + response = HttpResponse(zip_content, content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"' + response['Content-Length'] = len(zip_content) + + return response + + except Exception as e: + return Response({'error': f'Error en exportación particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + def export_datastage_multiple_to_csv(self, request, models_data, global_filters, related_keys): + """Exporta múltiples modelos de DataStage a múltiples archivos CSV en ZIP""" + zip_buffer = io.BytesIO() + + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: + + for model_data in models_data: + model_name = model_data.get('model') + fields = model_data.get('fields', []) + + if not model_name or not fields: + continue + + try: + model = apps.get_model('datastage', model_name) + filters = self.apply_related_filters(global_filters, model, related_keys, request.user) + + queryset = model.objects.filter(**filters).values(*fields) + total_records = queryset.count() + + if total_records == 0: + continue + + csv_buffer = io.StringIO() + writer = csv.writer(csv_buffer) + writer.writerow(fields) + + for row in queryset: + row_values = [self.safe_excel_value(row[field]) for field in fields] + writer.writerow(row_values) + + # Agregar al ZIP + filename = f"{model_name}.csv" + zip_file.writestr(filename, csv_buffer.getvalue()) + + except LookupError: + continue + + zip_buffer.seek(0) + + response = HttpResponse(zip_buffer.read(), content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"' + return response + + def export_datastage_multiple_partitioned_csv(self, request, models_data, global_filters, related_keys): + """Exporta múltiples modelos de DataStage a múltiples archivos CSV particionados en ZIP""" + try: + zip_buffer = io.BytesIO() + + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: + + for model_data in models_data: + model_name = model_data.get('model') + fields = model_data.get('fields', []) + + if not model_name or not fields: + continue + + try: + model = apps.get_model('datastage', model_name) + filters = self.apply_related_filters(global_filters, model, related_keys, request.user) + + queryset = model.objects.filter(**filters).values(*fields) + total_records = queryset.count() + + if total_records == 0: + continue + + if total_records > self.MAX_RECORDS_PER_FILE: + from django.core.paginator import Paginator + paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE) + + for page_num in paginator.page_range: + page = paginator.page(page_num) + + csv_buffer = io.StringIO() + writer = csv.writer(csv_buffer) + + writer.writerow(fields) + + for row in page.object_list: + row_values = [self.safe_excel_value(row[field]) for field in fields] + writer.writerow(row_values) + + # Agregar al ZIP + filename = f"{model_name}_part{page_num}.csv" + zip_file.writestr(filename, csv_buffer.getvalue()) + + else: + # Modelo pequeño, exportar completo + csv_buffer = io.StringIO() + writer = csv.writer(csv_buffer) + + # Escribir encabezados + writer.writerow(fields) + + # Escribir datos + for row in queryset: + row_values = [self.safe_excel_value(row[field]) for field in fields] + writer.writerow(row_values) + + # Agregar al ZIP + filename = f"{model_name}.csv" + zip_file.writestr(filename, csv_buffer.getvalue()) + + except LookupError as e: + continue + except Exception as e: + continue + + zip_buffer.seek(0) + + response = HttpResponse(zip_buffer.read(), content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="datastage_reports.zip"' + return response + + except Exception as e: + return Response({'error': f'Error en exportación CSV particionada: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + def export_single_model_partitioned(self, request, model_name, fields, filters, total_records): + """Exporta un solo modelo particionado a ZIP""" + try: + zip_buffer = io.BytesIO() + module = 'datastage' + + model = apps.get_model(module, model_name) + queryset = model.objects.filter(**filters).values(*fields) + + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: + from django.core.paginator import Paginator + paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE) + + for page_num in paginator.page_range: + page = paginator.page(page_num) + + # Crear Excel para esta parte + wb = openpyxl.Workbook() + ws = wb.active + ws.title = f"Parte_{page_num}"[:31] + ws.append(fields) + + for row in page.object_list: + row_values = [self.safe_excel_value(row[field]) for field in fields] + ws.append(row_values) + + part_buffer = io.BytesIO() + wb.save(part_buffer) + part_buffer.seek(0) + + filename = f"{model_name}_part{page_num}.xlsx" + zip_file.writestr(filename, part_buffer.getvalue()) + + zip_buffer.seek(0) + zip_content = zip_buffer.getvalue() + + response = HttpResponse(zip_content, content_type='application/zip') + response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"' + response['Content-Length'] = len(zip_content) + + return response + + except Exception as e: + return Response({'error': f'Error exportando modelo: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + def export_single_model_csv_partitioned(self, request, model_name, fields, filters, total_records): + """Exporta un solo modelo CSV particionado a ZIP""" + try: + zip_buffer = io.BytesIO() + module = 'datastage' + + model = apps.get_model(module, model_name) + queryset = model.objects.filter(**filters).values(*fields) + + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: + from django.core.paginator import Paginator + paginator = Paginator(queryset, self.MAX_RECORDS_PER_FILE) + + for page_num in paginator.page_range: + page = paginator.page(page_num) + + csv_buffer = io.StringIO() + writer = csv.writer(csv_buffer) + writer.writerow(fields) + + for row in page.object_list: + row_values = [self.safe_excel_value(row[field]) for field in fields] + writer.writerow(row_values) + + # Agregar al ZIP + filename = f"{model_name}_part{page_num}.csv" + zip_file.writestr(filename, csv_buffer.getvalue()) + + zip_buffer.seek(0) + + zip_content = zip_buffer.getvalue() + + response = HttpResponse(zip_content, content_type='application/zip') + response['Content-Disposition'] = f'attachment; filename="{model_name}_particionado.zip"' + response['Content-Length'] = len(zip_content) + + return response + + except Exception as e: + return Response({'error': f'Error exportando modelo CSV: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + def get_related_keys_from_filters(self, global_filters, models_data, user): + """ + Obtiene patentes, pedimentos y datastages que cumplen EXACTAMENTE con TODOS los filtros globales + para usarlos como relación entre modelos + """ + + related_keys = { + 'patentes': set(), + 'pedimentos': set(), + 'datastage_ids': set() + } + + # Si no hay filtros globales, retornar vacío (no hay relación) + if not any(global_filters.values()): + return {} + + all_records_with_filters = [] + + # Buscar en TODOS los modelos que puedan tener los campos de filtro + for model_data in models_data: + model_name = model_data.get('model') + try: + model = apps.get_model('datastage', model_name) + model_fields = [f.name for f in model._meta.get_fields()] + + # Construir filtros EXACTOS con TODOS los campos disponibles + filters = {} + has_any_filter = False + + if 'organizacion' in model_fields and global_filters.get('organizacion'): + filters['organizacion'] = global_filters['organizacion'] + has_any_filter = True + + if 'patente' in model_fields and global_filters.get('patente'): + filters['patente'] = global_filters['patente'] + has_any_filter = True + + if 'pedimento' in model_fields and global_filters.get('pedimento'): + filters['pedimento'] = global_filters['pedimento'] + has_any_filter = True + + if 'rfc' in model_fields and global_filters.get('rfc'): + filters['rfc'] = global_filters['rfc'] + has_any_filter = True + + if 'fecha_pago_real' in model_fields: + if global_filters.get('fecha_pago_desde'): + filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde'] + has_any_filter = True + + if global_filters.get('fecha_pago_hasta'): + filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta'] + has_any_filter = True + + if has_any_filter: + records = model.objects.filter(**filters).values('patente', 'pedimento', 'datastage_id') + record_count = records.count() + all_records_with_filters.extend(list(records)) + + except LookupError: + continue + + if not all_records_with_filters: + return {'patentes': set(), 'pedimentos': set(), 'datastage_ids': set()} + + for record in all_records_with_filters: + if record.get('patente'): + related_keys['patentes'].add(record['patente']) + if record.get('pedimento'): + related_keys['pedimentos'].add(record['pedimento']) + if record.get('datastage_id'): + related_keys['datastage_ids'].add(record['datastage_id']) + + related_keys = {k: list(v) for k, v in related_keys.items() if v} + + return related_keys + + def apply_global_filters_to_model(self, global_filters, model, user): + """ + Aplica filtros globales específicamente para modelos DataStage (modo simple) + """ + filters = {} + model_fields = [f.name for f in model._meta.get_fields()] + + if 'organizacion' in model_fields and global_filters.get('organizacion'): + filters['organizacion'] = global_filters['organizacion'] + + if 'patente' in model_fields and global_filters.get('patente'): + filters['patente'] = global_filters['patente'] + + if 'pedimento' in model_fields and global_filters.get('pedimento'): + filters['pedimento'] = global_filters['pedimento'] + + if 'rfc' in model_fields and global_filters.get('rfc'): + filters['rfc'] = global_filters['rfc'] + + if 'fecha_pago_real' in model_fields: + if global_filters.get('fecha_pago_desde'): + filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde'] + + if global_filters.get('fecha_pago_hasta'): + filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta'] + + return filters + + def apply_related_filters(self, global_filters, model, related_keys, user): + """ + Aplica filtros relacionados basados en campos comunes de manera ESTRICTA - VERSIÓN CORREGIDA + """ + filters = {} + model_fields = [f.name for f in model._meta.get_fields()] + + + # 🔥 ESTRATEGIA MEJORADA: Usar claves relacionadas SI HAY, sino aplicar filtros directos SOLO si existen + has_related_keys = any(related_keys.values()) + + if has_related_keys: + # 🔥 MODO RELACIONADO ESTRICTO: Usar SOLO las claves obtenidas + + # Crear condiciones para las claves relacionadas + from django.db.models import Q + related_conditions = Q() + has_related_conditions = False + + if related_keys.get('patentes') and 'patente' in model_fields: + filters['patente__in'] = related_keys['patentes'] + has_related_conditions = True + + if related_keys.get('pedimentos') and 'pedimento' in model_fields: + filters['pedimento__in'] = related_keys['pedimentos'] + has_related_conditions = True + + if related_keys.get('datastage_ids') and 'datastage_id' in model_fields: + filters['datastage_id__in'] = related_keys['datastage_ids'] + has_related_conditions = True + + # Si NO HAY condiciones relacionadas para este modelo (no tiene los campos) + if not has_related_conditions: + return {} # Retornar filtro vacío hará que no se obtengan registros + + else: + # 🔥 MODO DIRECTO: No hay claves relacionadas, aplicar filtros directos SOLO si existen + if 'organizacion' in model_fields and global_filters.get('organizacion'): + filters['organizacion'] = global_filters['organizacion'] + if 'patente' in model_fields and global_filters.get('patente'): + filters['patente'] = global_filters['patente'] + if 'pedimento' in model_fields and global_filters.get('pedimento'): + filters['pedimento'] = global_filters['pedimento'] + if 'rfc' in model_fields and global_filters.get('rfc'): + filters['rfc'] = global_filters['rfc'] + # 🔥 APLICAR ORGANIZACIÓN SIEMPRE si existe (en ambos modos) + if 'organizacion' in model_fields and global_filters.get('organizacion'): + filters['organizacion'] = global_filters['organizacion'] + # 🔥 APLICAR FILTROS DE FECHA SIEMPRE (si el campo existe) + if 'fecha_pago_real' in model_fields: + if global_filters.get('fecha_pago_desde'): + filters['fecha_pago_real__gte'] = global_filters['fecha_pago_desde'] + if global_filters.get('fecha_pago_hasta'): + filters['fecha_pago_real__lte'] = global_filters['fecha_pago_hasta'] + + return filters class ExportModelView(APIView): my_tags = ['Reportes'] - permission_classes = [IsAuthenticated & ( - IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)] + permission_classes = [IsAuthenticated & (IsSameOrganization | IsSameOrganizationAndAdmin | IsSameOrganizationDeveloper | IsSuperUser)] @swagger_auto_schema( manual_parameters=[ diff --git a/api/reports/views_table.py b/api/reports/views_table.py index b67bb35..19b683f 100644 --- a/api/reports/views_table.py +++ b/api/reports/views_table.py @@ -1,5 +1,5 @@ from api.reports.models import ReportDocument -from api.reports.tasks.report_document import generate_report_document +from api.reports.tasks.report_document import generate_report_document, generate_report_control_pedimento from django.http import FileResponse from rest_framework.decorators import api_view, permission_classes from rest_framework.permissions import IsAuthenticated @@ -11,7 +11,10 @@ def table_summary(request): """ Solo dispara la tarea asíncrona para generar el reporte CSV. No consulta ni procesa datos. """ + org_id = request.query_params.get('organizacion_id') + # hasta aqui si llega y crea el registro en la base de datos + print(f'🖼️🖼️🖼️🖼️🖼️🖼️🖼️ table_summary organizacion id = {org_id}') if not org_id: return Response({"error": "organizacion_id es requerido"}, status=400) # Obtener filtros de query params @@ -60,7 +63,8 @@ def table_summary(request): report = ReportDocument.objects.create( user=request.user, filters=filtros, - status='pending' + status='pending', + report_type='cumplimiento' ) generate_report_document.delay(report.id) return Response({ @@ -94,6 +98,7 @@ def report_document_list(request): data = [ { "report_id": r.id, + "report_type": r.report_type, "status": r.status, "created_at": r.created_at, "finished_at": r.finished_at, @@ -114,4 +119,50 @@ def report_document_download(request, report_id): response = FileResponse(report.file.open('rb'), as_attachment=True, filename=report.file.name) return response except ReportDocument.DoesNotExist: - return Response({"error": "Reporte no encontrado"}, status=404) \ No newline at end of file + return Response({"error": "Reporte no encontrado"}, status=404) + +@api_view(['GET']) +@permission_classes([IsAuthenticated]) +def control_pedimento(request): + """ + Dispara la tarea asíncrona para generar el reporte CSV de control de Pedimentos. + """ + org_id = request.query_params.get('organizacion_id') + + if not org_id: + return Response({"error": "organizacion_id es requerido"}, status=400) + + # Simplificar la lógica de fechas + fecha_pago_gte = request.query_params.get('fecha_pago__gte') + fecha_pago_lte = request.query_params.get('fecha_pago__lte') + pedimento_app = request.query_params.get('pedimento_app') + + # Si las fechas vienen como string, mantenerlas como están + fecha_pago_gte_str = fecha_pago_gte if fecha_pago_gte else None + fecha_pago_lte_str = fecha_pago_lte if fecha_pago_lte else None + + filtros = { + "pedimento_app": pedimento_app, + "organizacion_id": org_id, + "fecha_pago__gte": fecha_pago_gte_str, + "fecha_pago__lte": fecha_pago_lte_str, + } + + # Crear el reporte + report = ReportDocument.objects.create( + user=request.user, + filters=filtros, + status='pending', + report_type='control_pedimento' + ) + + # Disparar la tarea asíncrona + generate_report_control_pedimento.delay(report.id) + + return Response({ + "report_id": report.id, + "status": report.status, + "created_at": report.created_at, + "message": "Reporte en proceso de generación", + "download_url": report.file.url if report.file else None + }, status=202) \ No newline at end of file