"""
文件导入服务
"""

import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional, Tuple
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from sqlalchemy.exc import IntegrityError

from app.core.config import settings
from app.models.raw_orders import RawOrder, ProcessedFile
from app.utils.file_hash import calculate_file_sha1
from app.utils.excel_reader import ExcelReader
from app.utils.file_validator import FileValidator

logger = logging.getLogger(__name__)


class ImportService:
    """文件导入服务"""

    def __init__(self):
        self.excel_reader = ExcelReader()
        self.file_validator = FileValidator()
        
    async def scan_files(self) -> List[Path]:
        """
        扫描共享文件夹中的 Excel 文件
        只返回最新的"原始订单导出"开头的文件
        
        Returns:
            包含最新文件的列表（如果存在）
        """
        inbox_path = settings.shared_inbox_path
        logger.debug(f"Scanning directory: {inbox_path} (type: {type(inbox_path)})")
        
        if not inbox_path.exists():
            logger.warning(f"Shared inbox directory does not exist: {inbox_path}")
            return []
        
        # 查找以"原始订单导出"开头的Excel文件
        matching_files = []
        for pattern in ['*.xls', '*.xlsx']:
            pattern_files = list(inbox_path.glob(pattern))
            logger.debug(f"Files matching {pattern}: {len(pattern_files)}")
            for file_path in pattern_files:
                # 检查文件名是否以"原始订单导出"开头
                if file_path.name.startswith('原始订单导出'):
                    matching_files.append(file_path)
                    logger.debug(f"Found matching file: {file_path.name}")
        
        if not matching_files:
            logger.info(f"No files starting with '原始订单导出' found in {inbox_path}")
            return []
        
        # 按修改时间排序，获取最新的文件
        latest_file = max(matching_files, key=lambda x: x.stat().st_mtime)
        
        logger.info(f"Found latest order export file: {latest_file.name} "
                   f"(modified: {datetime.fromtimestamp(latest_file.stat().st_mtime).strftime('%Y-%m-%d %H:%M:%S')})")
        
        # 只返回最新的文件
        return [latest_file]
    
    async def check_file_processed(self, db: AsyncSession, file_path: Path) -> Optional[ProcessedFile]:
        """
        检查文件是否已处理
        
        Args:
            db: 数据库会话
            file_path: 文件路径
            
        Returns:
            如果已处理返回 ProcessedFile 记录，否则返回 None
        """
        try:
            file_hash = calculate_file_sha1(file_path)
            result = await db.execute(
                select(ProcessedFile).where(ProcessedFile.file_hash == file_hash)
            )
            return result.scalar_one_or_none()
        except Exception as e:
            logger.error(f"Error checking processed file {file_path}: {e}")
            return None
    
    async def import_file(self, db: AsyncSession, file_path: Path, mode: str = 'incremental') -> Dict[str, Any]:
        """
        导入单个文件
        
        Args:
            db: 数据库会话
            file_path: 文件路径
            mode: 导入模式 ('incremental'=仅新增, 'replace'=替换, 'update'=更新)
            
        Returns:
            导入结果字典
        """
        start_time = datetime.now()
        file_hash = calculate_file_sha1(file_path)
        file_size = file_path.stat().st_size
        
        result = {
            'file_name': file_path.name,
            'file_path': str(file_path),
            'file_hash': file_hash,
            'file_size': file_size,
            'start_time': start_time,
            'status': 'processing',
            'rows_total': 0,
            'rows_imported': 0,
            'rows_failed': 0,
            'errors': []
        }
        
        try:
            # 检查是否为干跑模式
            if settings.DRY_RUN_ANALYZE_ONLY:
                logger.info(f"DRY_RUN mode: skipping actual import of {file_path.name}")
                result.update({
                    'status': 'skipped',
                    'message': 'Skipped due to DRY_RUN_ANALYZE_ONLY=true'
                })
                return result

            # 文件质量验证（如果启用）
            if settings.FILE_VALIDATION_ENABLED:
                logger.info(f"Validating file quality: {file_path.name}")
                validation_result = self.file_validator.validate_excel_file(file_path)

                if not validation_result.is_valid and settings.FILE_VALIDATION_STRICT:
                    logger.error(f"File validation failed for {file_path.name}")
                    logger.error(f"Validation errors: {validation_result.errors}")

                    result.update({
                        'status': 'rejected',
                        'message': 'File validation failed',
                        'validation_errors': validation_result.errors,
                        'validation_warnings': validation_result.warnings,
                        'validation_statistics': validation_result.statistics
                    })

                    # 记录到ProcessedFile表，标记为失败
                    await self._record_failed_import(
                        db, file_path, file_hash, file_size,
                        {'validation_errors': validation_result.errors}
                    )

                    return result

                # 如果有警告，记录但继续处理
                if validation_result.warnings:
                    logger.warning(f"File validation warnings for {file_path.name}: {validation_result.warnings}")
                    result['validation_warnings'] = validation_result.warnings

            # 读取和处理 Excel 文件
            logger.info(f"Processing file: {file_path.name}")
            records = self.excel_reader.process_excel_file(file_path)
            result['rows_total'] = len(records)
            
            if not records:
                result.update({
                    'status': 'empty',
                    'message': 'No data rows found in file'
                })
                return result
            
            # 检测并标记合并订单，计算商品小计
            records = self._detect_merged_orders_and_calculate_subtotals(records)
            
            # 批量导入
            imported_count, failed_count, errors = await self._batch_import_records(
                db, records, file_path.name, file_hash, mode
            )
            
            result.update({
                'rows_imported': imported_count,
                'rows_failed': failed_count,
                'errors': errors,
                'status': 'success' if failed_count == 0 else 'partial_success'
            })
            
            # 记录处理结果
            await self._save_processed_file(db, result)
            
            end_time = datetime.now()
            duration = (end_time - start_time).total_seconds()
            logger.info(f"Import completed: {file_path.name} - "
                       f"{imported_count}/{len(records)} rows imported in {duration:.2f}s")
            
        except Exception as e:
            logger.error(f"Import failed for {file_path.name}: {e}")
            result.update({
                'status': 'error',
                'error_message': str(e),
                'errors': [str(e)]
            })
            
            # 记录失败结果
            try:
                await self._save_processed_file(db, result)
            except Exception as save_error:
                logger.error(f"Failed to save error result: {save_error}")
        
        return result
    
    async def _batch_import_records(self, 
                                   db: AsyncSession, 
                                   records: List[Dict[str, Any]], 
                                   file_name: str, 
                                   file_hash: str,
                                   mode: str = 'incremental') -> Tuple[int, int, List[str]]:
        """
        批量导入记录
        
        Args:
            db: 数据库会话
            records: 记录列表
            file_name: 文件名
            file_hash: 文件哈希
            mode: 导入模式 ('incremental'=仅新增, 'replace'=替换, 'update'=更新)
            
        Returns:
            (成功数量, 失败数量, 错误列表)
        """
        imported_count = 0
        failed_count = 0
        errors = []
        skipped_count = 0  # 跳过的重复记录数
        updated_count = 0  # 更新的记录数
        
        # 预检查：找出已存在的记录
        existing_orders = await self._check_existing_orders(db, records)
        logger.info(f"预检查完成：{len(records)}条记录中，{len(existing_orders)}条已存在")

        # 添加更详细的调试信息
        if existing_orders and logger.isEnabledFor(logging.DEBUG):
            sample_keys = list(existing_orders)[:3]
            logger.debug(f"示例已存在的key: {sample_keys}")

        # 分批处理
        batch_size = settings.BATCH_SIZE
        for i in range(0, len(records), batch_size):
            batch = records[i:i + batch_size]
            
            try:
                # 创建 RawOrder 实例
                raw_orders = []
                for record in batch:
                    # 检查是否为重复记录
                    order_key = self._get_order_key(record)
                    if order_key in existing_orders:
                        if mode == 'incremental':
                            skipped_count += 1
                            if skipped_count <= 3 and logger.isEnabledFor(logging.DEBUG):
                                logger.debug(f"跳过重复记录: order_id={record.get('原始订单编号')}, key={order_key[:100]}")
                            continue  # 跳过已存在的记录
                        elif mode == 'update':
                            # 更新已存在的记录（只更新特定字段）
                            try:
                                updated = await self._update_existing_order(db, record)
                                if updated:
                                    updated_count += 1
                                else:
                                    failed_count += 1
                            except Exception as update_error:
                                row_idx = record.get('row_idx', '?')
                                error_msg = f"Row {row_idx} 更新失败: {str(update_error)}"
                                errors.append(error_msg)
                                failed_count += 1
                                logger.debug(error_msg)
                            continue
                    
                    try:
                        raw_order = await self._create_raw_order(record, file_name, file_hash)
                        raw_orders.append(raw_order)
                    except Exception as create_error:
                        # Handle individual record creation errors
                        row_idx = record.get('row_idx', '?')
                        error_msg = f"Row {row_idx} creation failed: {str(create_error)}"
                        errors.append(error_msg)
                        failed_count += 1
                        logger.debug(error_msg)
                        continue
                
                if raw_orders:
                    # 批量插入
                    db.add_all(raw_orders)
                    await db.commit()
                    
                    imported_count += len(raw_orders)
                    logger.debug(f"Imported batch {i//batch_size + 1}: {len(raw_orders)} records")
                
            except IntegrityError as e:
                await db.rollback()
                # 处理唯一约束冲突
                if "UNIQUE constraint failed" in str(e) or "uk_raw_order_dedup" in str(e):
                    logger.info(f"Batch {i//batch_size + 1} contains duplicates, switching to single import mode")
                    # 尝试逐条插入以识别具体的重复记录
                    single_imported, single_failed, single_errors = await self._single_import_batch(
                        db, batch, file_name, file_hash, mode, existing_orders
                    )
                    imported_count += single_imported
                    failed_count += single_failed
                    errors.extend(single_errors)
                else:
                    error_msg = f"Database constraint error for batch {i}-{i+len(batch)-1}: {str(e)}"
                    logger.error(error_msg)
                    errors.append(error_msg)
                    failed_count += len(batch)
            except Exception as e:
                await db.rollback()
                error_msg = f"Batch import failed for records {i}-{i+len(batch)-1}: {str(e)}"
                logger.error(error_msg)
                
                # 尝试逐条插入以识别具体错误
                single_imported, single_failed, single_errors = await self._single_import_batch(
                    db, batch, file_name, file_hash
                )
                
                imported_count += single_imported
                failed_count += single_failed
                errors.extend(single_errors)
        
        if skipped_count > 0 or updated_count > 0:
            logger.info(f"导入完成：成功{imported_count}条，更新{updated_count}条，跳过{skipped_count}条重复，失败{failed_count}条")
        
        # 返回时将更新数量加入成功数量
        return imported_count + updated_count, failed_count, errors
    
    async def _single_import_batch(self, 
                                   db: AsyncSession, 
                                   batch: List[Dict[str, Any]], 
                                   file_name: str, 
                                   file_hash: str,
                                   mode: str = 'incremental',
                                   existing_orders: set = None) -> Tuple[int, int, List[str]]:
        """
        逐条导入批次（用于错误恢复）
        
        Args:
            db: 数据库会话
            batch: 批次记录
            file_name: 文件名
            file_hash: 文件哈希
            
        Returns:
            (成功数量, 失败数量, 错误列表)
        """
        imported = 0
        failed = 0
        errors = []
        skipped = 0
        
        if existing_orders is None:
            existing_orders = set()
        
        for record in batch:
            # 检查是否为重复记录
            order_key = self._get_order_key(record)
            if order_key in existing_orders:
                if mode == 'incremental':
                    skipped += 1
                    continue
                elif mode == 'update':
                    # 更新已存在的记录
                    try:
                        updated = await self._update_existing_order(db, record)
                        if updated:
                            imported += 1  # 更新计入成功数
                        else:
                            failed += 1
                    except Exception as e:
                        await db.rollback()
                        row_idx = record.get('row_idx', '?')
                        error_msg = f"Row {row_idx}: 更新失败 - {str(e)}"
                        errors.append(error_msg)
                        failed += 1
                    continue
            
            try:
                raw_order = await self._create_raw_order(record, file_name, file_hash)
                db.add(raw_order)
                await db.commit()
                imported += 1
            except IntegrityError as e:
                await db.rollback()
                row_idx = record.get('row_idx', '?')
                order_id = record.get('原始订单编号', 'N/A')
                
                # 检查是否是订单重复错误
                if "uk_raw_order_dedup" in str(e) or "UNIQUE constraint failed" in str(e):
                    # 重复记录不记录为错误，只是跳过
                    logger.debug(f"Duplicate order skipped: {order_id}")
                    skipped += 1
                else:
                    error_msg = f"Row {row_idx}: 数据库约束错误 - {str(e)}"
                    logger.error(error_msg)
                    errors.append(error_msg)
                    failed += 1
                
            except Exception as e:
                await db.rollback()
                row_idx = record.get('row_idx', '?')
                error_msg = f"Row {row_idx}: {str(e)}"
                errors.append(error_msg)
                failed += 1
                logger.debug(error_msg)
        
        return imported, failed, errors
    
    def _preprocess_for_dedup(self, record: Dict[str, Any]) -> Dict[str, Any]:
        """
        预处理记录以确保去重约束生效
        将NULL值替换为默认值，避免SQLite NULL!=NULL问题
        
        Args:
            record: 原始记录数据
            
        Returns:
            预处理后的记录数据
        """
        # 创建副本避免修改原始数据
        processed_record = record.copy()
        
        # 导入datetime模块
        from datetime import datetime
        
        # 定义去重关键字段的默认值
        dedup_field_defaults = {
            '线上宝贝名称': '',           # 空字符串
            '线上销售属性': '',           # 空字符串  
            '原始订单编号': '',           # 空字符串
            '付款时间': datetime(1900, 1, 1, 0, 0, 0),  # 默认时间对象
            '数量': 0                    # 默认数量
        }
        
        # 处理去重字段的NULL值
        for field, default_value in dedup_field_defaults.items():
            if field in processed_record:
                # 将None、空字符串、空白字符串统一处理
                if processed_record[field] is None or str(processed_record[field]).strip() == '':
                    processed_record[field] = default_value
                    logger.debug(f"预处理字段 {field}: None/空 -> {default_value}")
        
        return processed_record
    
    def _detect_merged_orders_and_calculate_subtotals(self, records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        检测合并订单并计算商品小计
        
        合并订单特征：
        - 同一个原始订单编号出现多次
        - 每行代表不同的商品
        - 订单金额是所有商品的总金额
        
        Args:
            records: 订单记录列表
            
        Returns:
            更新后的记录列表，增加了商品小计和合并订单标识
        """
        from collections import defaultdict
        
        # 按订单编号分组
        order_groups = defaultdict(list)
        for idx, record in enumerate(records):
            order_id = record.get('原始订单编号')
            if order_id:
                order_groups[order_id].append((idx, record))
        
        # 处理每个订单组
        for order_id, group in order_groups.items():
            is_merged = len(group) > 1  # 如果同一订单号有多条记录，则为合并订单
            
            if is_merged:
                # 合并订单：需要重新计算每个商品的小计
                # 获取订单总金额（应该在每行都相同）
                total_amount = None
                for idx, record in group:
                    order_amount = record.get('订单金额')
                    if order_amount:
                        try:
                            total_amount = float(order_amount)
                            break
                        except (ValueError, TypeError):
                            pass
                
                # 计算每个商品的小计
                for idx, record in group:
                    # 标记为合并订单
                    records[idx]['是否合并订单'] = True
                    
                    # 计算商品小计
                    quantity = record.get('数量')
                    unit_price = record.get('订单单价')
                    
                    if quantity and unit_price:
                        try:
                            qty = float(quantity)
                            price = float(unit_price)
                            item_subtotal = qty * price
                            records[idx]['商品小计'] = item_subtotal
                            logger.debug(f"合并订单 {order_id} - 商品小计: {item_subtotal} (数量: {qty} × 单价: {price})")
                        except (ValueError, TypeError) as e:
                            logger.warning(f"无法计算订单 {order_id} 的商品小计: {e}")
                            # 如果无法计算，使用订单金额除以商品数作为备选方案
                            if total_amount and len(group) > 0:
                                records[idx]['商品小计'] = total_amount / len(group)
                            else:
                                records[idx]['商品小计'] = None
                    else:
                        # 缺少数量或单价，无法计算小计
                        if total_amount and len(group) > 0:
                            # 平均分配总金额
                            records[idx]['商品小计'] = total_amount / len(group)
                        else:
                            records[idx]['商品小计'] = None
            else:
                # 单品订单：商品小计就是订单金额
                idx, record = group[0]
                records[idx]['是否合并订单'] = False
                
                # 对于单品订单，商品小计 = 数量 × 单价
                quantity = record.get('数量')
                unit_price = record.get('订单单价')
                
                if quantity and unit_price:
                    try:
                        qty = float(quantity)
                        price = float(unit_price)
                        item_subtotal = qty * price
                        records[idx]['商品小计'] = item_subtotal
                    except (ValueError, TypeError):
                        # 如果计算失败，使用订单金额
                        order_amount = record.get('订单金额')
                        if order_amount:
                            try:
                                records[idx]['商品小计'] = float(order_amount)
                            except (ValueError, TypeError):
                                records[idx]['商品小计'] = None
                        else:
                            records[idx]['商品小计'] = None
                else:
                    # 使用订单金额作为商品小计
                    order_amount = record.get('订单金额')
                    if order_amount:
                        try:
                            records[idx]['商品小计'] = float(order_amount)
                        except (ValueError, TypeError):
                            records[idx]['商品小计'] = None
                    else:
                        records[idx]['商品小计'] = None
        
        logger.info(f"订单分析完成: 总订单数 {len(order_groups)}, 合并订单数 {sum(1 for g in order_groups.values() if len(g) > 1)}")
        
        return records

    async def _create_raw_order(self, record: Dict[str, Any], file_name: str, file_hash: str) -> RawOrder:
        """
        创建 RawOrder 实例
        
        Args:
            record: 记录数据
            file_name: 文件名
            file_hash: 文件哈希
            
        Returns:
            RawOrder 实例
        """
        # 预处理记录以确保去重约束生效
        record = self._preprocess_for_dedup(record)
        # 提取已知字段
        known_fields = {}
        extra_fields = {}
        
        # RawOrder 模型的列映射
        raw_order_columns = {
            'row_idx', '原始订单编号', '网店名称', '交易状态', '付款时间', '下载时间', '拍下时间', 
            '交易完成时间', '买家旺旺号', '买家留言', '收货人姓名', '收货人手机号', '收货人身份证', 
            '收货地址', '线上宝贝名称', '线上销售属性', '线上商家编码', '商品编号', 'SKU编号', '图片', 
            '数量', '订单单价', '订单金额', '实付金额', '退款状态', '退款金额', '卖家备注', '卖家留言', 
            '发货时间', '物流公司', '物流单号', '商品小计', '是否合并订单'
        }
        
        for key, value in record.items():
            if key in raw_order_columns:
                # Known fields can contain datetime objects (they map to DateTime columns)
                known_fields[key] = value
            else:
                # Extra fields will be stored as JSON, so convert datetime objects to strings
                if isinstance(value, datetime):
                    extra_fields[key] = value.isoformat()
                elif value is None:
                    extra_fields[key] = None
                else:
                    extra_fields[key] = value
        
        # 创建实例
        raw_order = RawOrder(
            file_name=file_name,
            file_hash=file_hash,
            extra_fields=extra_fields if extra_fields else None,
            **known_fields
        )
        
        return raw_order
    
    async def _save_processed_file(self, db: AsyncSession, result: Dict[str, Any]):
        """
        保存文件处理记录
        
        Args:
            db: 数据库会话
            result: 处理结果
        """
        processed_file = ProcessedFile(
            file_name=result['file_name'],
            file_path=result['file_path'],
            file_hash=result['file_hash'],
            file_size=result['file_size'],
            rows_total=result['rows_total'],
            rows_imported=result['rows_imported'],
            rows_failed=result['rows_failed'],
            errors=result['errors'] if result['errors'] else None,
            status=result['status']
        )
        
        try:
            db.add(processed_file)
            await db.commit()
        except IntegrityError:
            # 文件已存在，更新记录
            await db.rollback()
            stmt = select(ProcessedFile).where(ProcessedFile.file_hash == result['file_hash'])
            existing = await db.execute(stmt)
            existing_file = existing.scalar_one_or_none()
            
            if existing_file:
                existing_file.rows_total = result['rows_total']
                existing_file.rows_imported = result['rows_imported']
                existing_file.rows_failed = result['rows_failed']
                existing_file.errors = result['errors'] if result['errors'] else None
                existing_file.status = result['status']
                existing_file.processed_at = datetime.now()
                
                await db.commit()

    async def _record_failed_import(self, db: AsyncSession, file_path: Path,
                                   file_hash: str, file_size: int,
                                   error_details: Dict[str, Any]):
        """
        记录失败的导入到ProcessedFile表

        Args:
            db: 数据库会话
            file_path: 文件路径
            file_hash: 文件哈希
            file_size: 文件大小
            error_details: 错误详情
        """
        processed_file = ProcessedFile(
            file_name=file_path.name,
            file_path=str(file_path),
            file_hash=file_hash,
            file_size=file_size,
            rows_total=0,
            rows_imported=0,
            rows_failed=0,
            errors=error_details,
            status='validation_failed'
        )

        try:
            db.add(processed_file)
            await db.commit()
        except IntegrityError:
            # 如果文件已存在，更新为失败状态
            await db.rollback()
            stmt = select(ProcessedFile).where(ProcessedFile.file_hash == file_hash)
            existing = await db.execute(stmt)
            existing_file = existing.scalar_one_or_none()

            if existing_file:
                existing_file.errors = error_details
                existing_file.status = 'validation_failed'
                existing_file.processed_at = datetime.now()
                await db.commit()

    def _normalize_quantity(self, quantity: Any) -> str:
        """
        标准化数量字段为字符串格式的整数

        Args:
            quantity: 原始数量值

        Returns:
            标准化后的数量字符串
        """
        if quantity is None or quantity == '':
            return '0'

        try:
            # 转换为浮点数再转为整数，处理 1.0 这种情况
            qty = int(float(str(quantity)))
            return str(qty)
        except (ValueError, TypeError):
            return str(quantity)

    def _normalize_payment_time(self, payment_time: Any) -> str:
        """
        标准化付款时间格式

        Args:
            payment_time: 原始付款时间

        Returns:
            标准化后的付款时间字符串
        """
        if payment_time is None or payment_time == '':
            return ''

        # 转换为字符串
        time_str = str(payment_time)

        # 移除微秒部分 (.000000)
        if '.' in time_str:
            time_str = time_str.split('.')[0]

        return time_str

    def _normalize_empty_value(self, value: Any) -> str:
        """
        标准化空值处理
        将 None 统一转换为空字符串，解决 None != '' 的问题

        Args:
            value: 原始值

        Returns:
            标准化后的字符串（None 转为空字符串）
        """
        if value is None:
            return ''
        return str(value)

    async def _check_existing_orders(self, db: AsyncSession, records: List[Dict[str, Any]]) -> set:
        """
        检查哪些订单已经存在于数据库中

        Args:
            db: 数据库会话
            records: 待导入的记录列表

        Returns:
            已存在订单的key集合
        """
        from sqlalchemy import select, and_

        existing_keys = set()

        # 批量查询已存在的订单
        batch_size = 500
        for i in range(0, len(records), batch_size):
            batch = records[i:i + batch_size]

            # 构建查询条件
            conditions = []
            for record in batch:
                # 标准化所有字段值（解决 None != '' 的问题）
                product_name = self._normalize_empty_value(record.get('线上宝贝名称'))
                sales_attr = self._normalize_empty_value(record.get('线上销售属性'))
                quantity = record.get('数量', 0)
                order_id = self._normalize_empty_value(record.get('原始订单编号'))

                # 标准化付款时间用于前缀匹配（解决数据库带微秒但Excel不带的问题）
                payment_time = record.get('付款时间', '')
                normalized_time = self._normalize_payment_time(payment_time)

                condition = and_(
                    RawOrder.线上宝贝名称 == product_name,
                    RawOrder.线上销售属性 == sales_attr,
                    RawOrder.数量 == quantity,
                    RawOrder.原始订单编号 == order_id,
                    # 使用 LIKE 前缀匹配，忽略微秒部分
                    RawOrder.付款时间.like(f"{normalized_time}%") if normalized_time else RawOrder.付款时间.is_(None)
                )
                conditions.append(condition)

            if conditions:
                from sqlalchemy import or_
                stmt = select(RawOrder.原始订单编号, RawOrder.线上宝贝名称,
                             RawOrder.线上销售属性, RawOrder.数量, RawOrder.付款时间).where(
                    or_(*conditions)
                )

                result = await db.execute(stmt)
                for row in result:
                    # 使用标准化的值构建key
                    key = self._get_order_key({
                        '原始订单编号': row[0],
                        '线上宝贝名称': row[1],
                        '线上销售属性': row[2],
                        '数量': row[3],
                        '付款时间': row[4]
                    })
                    existing_keys.add(key)

        return existing_keys

    def _get_order_key(self, record: Dict[str, Any]) -> str:
        """
        生成订单的唯一标识键

        Args:
            record: 订单记录

        Returns:
            订单唯一标识键
        """
        # 使用与数据库唯一约束相同的字段组合，但进行标准化
        # 使用 _normalize_empty_value 处理空值，确保 None 和 '' 一致
        key_parts = [
            self._normalize_empty_value(record.get('线上宝贝名称')),
            self._normalize_empty_value(record.get('线上销售属性')),
            self._normalize_quantity(record.get('数量', 0)),  # 标准化数量
            self._normalize_empty_value(record.get('原始订单编号')),
            self._normalize_payment_time(record.get('付款时间', ''))  # 标准化付款时间
        ]
        return '|'.join(key_parts)
    
    async def _update_existing_order(self, db: AsyncSession, record: Dict[str, Any]) -> bool:
        """
        更新已存在订单的特定字段（交易状态、卖家备注、退款状态）
        
        Args:
            db: 数据库会话
            record: 包含更新数据的记录
            
        Returns:
            是否更新成功
        """
        from sqlalchemy import update, and_
        from sqlalchemy.sql import func
        
        try:
            # 标准化所有字段值（解决 None != '' 的问题）
            product_name = self._normalize_empty_value(record.get('线上宝贝名称'))
            sales_attr = self._normalize_empty_value(record.get('线上销售属性'))
            quantity = record.get('数量', 0)
            order_id = self._normalize_empty_value(record.get('原始订单编号'))

            # 标准化付款时间（去掉微秒部分，解决数据库存储带微秒但Excel不带的问题）
            payment_time = record.get('付款时间', '')
            normalized_time = self._normalize_payment_time(payment_time)

            # 构建查询条件（使用去重字段）
            # 付款时间使用LIKE前缀匹配，因为数据库可能存储为 '2025-10-25 12:35:55.000000'
            # 而Excel导入的是 '2025-10-25 12:35:55'
            conditions = and_(
                RawOrder.线上宝贝名称 == product_name,
                RawOrder.线上销售属性 == sales_attr,
                RawOrder.数量 == quantity,
                RawOrder.原始订单编号 == order_id,
                RawOrder.付款时间.like(f"{normalized_time}%") if normalized_time else RawOrder.付款时间.is_(None)
            )
            
            # 更新主要字段（3个状态字段 + 2个新增字段）
            update_data = {}
            if '交易状态' in record:
                update_data['交易状态'] = record['交易状态']
            if '卖家备注' in record:
                update_data['卖家备注'] = record['卖家备注']
            if '退款状态' in record:
                update_data['退款状态'] = record['退款状态']
            # 更新新增的字段
            if '商品小计' in record:
                update_data['商品小计'] = record['商品小计']
            if '是否合并订单' in record:
                update_data['是否合并订单'] = record['是否合并订单']
            
            if not update_data:
                logger.debug(f"没有需要更新的字段: {record.get('原始订单编号', 'N/A')}")
                return False
            
            # 添加更新时间
            update_data['updated_at'] = func.now()
            
            # 执行更新
            stmt = update(RawOrder).where(conditions).values(**update_data)
            result = await db.execute(stmt)
            
            if result.rowcount > 0:
                await db.commit()
                logger.debug(f"成功更新订单 {record.get('原始订单编号', 'N/A')}: {list(update_data.keys())}")
                return True
            else:
                logger.warning(f"未找到需要更新的订单: {record.get('原始订单编号', 'N/A')}")
                return False
                
        except Exception as e:
            await db.rollback()
            logger.error(f"更新订单失败 {record.get('原始订单编号', 'N/A')}: {str(e)}")
            raise
    
    async def import_all_pending_files(self, db: AsyncSession, mode: str = 'update') -> List[Dict[str, Any]]:
        """
        导入最新的"原始订单导出"文件（如果尚未处理）
        
        Args:
            db: 数据库会话
            mode: 导入模式 ('incremental'=仅新增, 'update'=更新状态字段), 默认为'update'
            
        Returns:
            导入结果列表
        """
        # 扫描文件（现在只返回最新的"原始订单导出"文件）
        files = await self.scan_files()
        results = []
        
        if not files:
            logger.info("No '原始订单导出' files found for import")
            return results
        
        # 处理最新的文件
        file_path = files[0]  # scan_files现在只返回一个最新文件
        logger.info(f"Checking latest file for import: {file_path.name}")
        
        # 检查是否已处理
        processed = await self.check_file_processed(db, file_path)
        if processed:
            logger.info(f"Latest file already processed: {file_path.name} "
                       f"(processed at {processed.processed_at.strftime('%Y-%m-%d %H:%M:%S')})")
            results.append({
                'file_name': file_path.name,
                'status': 'already_processed',
                'processed_at': processed.processed_at
            })
        else:
            # 导入新文件
            logger.info(f"Starting import for new file: {file_path.name} with mode: {mode}")
            result = await self.import_file(db, file_path, mode)
            results.append(result)
            
            if result.get('status') == 'completed':
                logger.info(f"Successfully imported {file_path.name}: "
                           f"{result.get('rows_imported', 0)} records imported, "
                           f"{result.get('rows_failed', 0)} failed")
            else:
                logger.warning(f"Import failed for {file_path.name}: {result.get('error_message', 'Unknown error')}")
        
        return results