"""
图片下载服务 - 实现图片本地化存储
基于产品主表的图片URL，批量下载到本地以提升性能
"""

import asyncio
import aiohttp
import aiofiles
import hashlib
import os
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any
from urllib.parse import urlparse

from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, update
import logging

from app.core.config import settings
from app.models.products_master import ProductMaster, ImageDownloadLog

logger = logging.getLogger(__name__)


class ImageDownloadService:
    """图片下载服务 - 实现图片本地化存储"""
    
    def __init__(self, images_dir: str = None):
        # 图片存储目录
        self.images_dir = Path(images_dir or settings.IMAGES_DIR or 
                              Path(__file__).parent.parent.parent.parent / "images")
        self.images_dir.mkdir(exist_ok=True)
        
        # 下载配置（从settings中读取）
        self.max_concurrent = settings.IMAGE_DOWNLOAD_MAX_CONCURRENT  # 最大并发下载数
        self.batch_size = settings.IMAGE_DOWNLOAD_BATCH_SIZE  # 批量下载大小
        self.max_size_mb = 10   # 最大文件大小(MB)
        self.timeout = 30       # 超时时间(秒)
        self.max_retries = 3    # 最大重试次数
        
        # 请求头 - 模拟浏览器
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Referer': 'https://www.taobao.com/',
        }
        
        logger.info(f"图片下载服务初始化完成, 存储目录: {self.images_dir}")
        logger.info(f"批量下载配置: batch_size={self.batch_size}, max_concurrent={self.max_concurrent}")
    
    async def delayed_batch_download(self, db: AsyncSession, delay_seconds: int = 5, 
                                     use_online_first: bool = True) -> Dict[str, Any]:
        """
        延迟批量下载图片 - 用于产品主表生成后的异步下载
        
        支持两阶段处理：
        1. 立即使用在线URL（如果 use_online_first=True）
        2. 后台异步下载到本地
        
        Args:
            db: 数据库会话
            delay_seconds: 延迟秒数
            use_online_first: 是否先使用在线URL
            
        Returns:
            下载结果统计
        """
        # 如果启用在线优先，先标记所有产品使用在线URL
        if use_online_first:
            await self._mark_products_use_online(db)
            logger.info("已标记所有产品使用在线URL，将在后台下载到本地")
        
        # 延迟执行
        if delay_seconds > 0:
            logger.info(f"延迟 {delay_seconds} 秒后开始批量下载图片到本地...")
            await asyncio.sleep(delay_seconds)
        
        # 分批下载
        total_result = {
            'total_products': 0,
            'processed_products': 0,
            'total_images': 0,
            'downloaded_images': 0,
            'failed_images': 0,
            'batches_processed': 0,
            'use_online_first': use_online_first
        }
        
        while True:
            # 获取一批待下载的产品
            batch_result = await self.download_product_images(
                db=db,
                limit=self.batch_size
            )
            
            if batch_result['total_products'] == 0:
                break  # 没有更多产品需要下载
            
            # 累计统计
            total_result['total_products'] += batch_result['total_products']
            total_result['processed_products'] += batch_result['processed_products']
            total_result['total_images'] += batch_result['total_images']
            total_result['downloaded_images'] += batch_result['downloaded_images']
            total_result['failed_images'] += batch_result['failed_images']
            total_result['batches_processed'] += 1
            
            logger.info(f"批次 {total_result['batches_processed']} 完成: "
                       f"下载 {batch_result['downloaded_images']}/{batch_result['total_images']} 张图片")
            
            # 批次间短暂休息，避免过度消耗资源
            await asyncio.sleep(1)
        
        logger.info(f"批量图片下载完成: "
                   f"共 {total_result['batches_processed']} 批次, "
                   f"{total_result['processed_products']} 个产品, "
                   f"{total_result['downloaded_images']} 张图片下载成功")
        
        return total_result
    
    async def download_product_images(self, db: AsyncSession, 
                                    limit: int = None,
                                    product_id: int = None) -> Dict[str, Any]:
        """
        下载产品图片 - 主要入口方法
        
        Args:
            db: 数据库会话
            limit: 限制处理的产品数量
            product_id: 指定产品ID（用于单个产品下载）
            
        Returns:
            下载结果统计
        """
        # 获取需要下载图片的产品
        products = await self._get_products_for_download(db, limit, product_id)
        
        result = {
            'total_products': len(products),
            'processed_products': 0,
            'total_images': 0,
            'downloaded_images': 0,
            'failed_images': 0,
            'skipped_images': 0,
            'errors': []
        }
        
        if not products:
            logger.info("没有需要下载图片的产品")
            return result
        
        logger.info(f"开始下载 {len(products)} 个产品的图片")
        
        # 创建下载任务
        download_tasks = []
        for product in products:
            tasks = await self._create_download_tasks_for_product(db, product)
            download_tasks.extend(tasks)
            result['total_images'] += len(tasks)
        
        if not download_tasks:
            logger.info("没有图片需要下载")
            return result
        
        logger.info(f"共 {len(download_tasks)} 个图片下载任务")
        
        # 执行下载
        download_results = await self._execute_downloads(download_tasks)
        
        # 处理下载结果
        await self._process_download_results(db, download_results)
        
        # 统计结果
        for task_result in download_results:
            if task_result['success']:
                result['downloaded_images'] += 1
            else:
                result['failed_images'] += 1
                result['errors'].append(task_result.get('error', 'Unknown error'))
        
        result['processed_products'] = len(products)
        
        logger.info(f"图片下载完成: "
                   f"处理产品 {result['processed_products']}, "
                   f"下载成功 {result['downloaded_images']}, "
                   f"下载失败 {result['failed_images']}")
        
        return result
    
    async def _get_products_for_download(self, db: AsyncSession, 
                                       limit: int = None,
                                       product_id: int = None) -> List[ProductMaster]:
        """获取需要下载图片的产品"""
        query = select(ProductMaster)
        
        if product_id:
            # 下载指定产品
            query = query.where(ProductMaster.id == product_id)
        else:
            # 下载待处理的产品（包括在线待下载状态）
            query = query.where(ProductMaster.image_download_status.in_(['pending', 'failed', 'online_pending']))
            query = query.where(ProductMaster.original_image_urls.is_not(None))
        
        query = query.order_by(ProductMaster.created_at)
        
        if limit:
            query = query.limit(limit)
        
        result = await db.execute(query)
        products = result.scalars().all()
        
        logger.info(f"找到 {len(products)} 个产品需要下载图片")
        return products
    
    async def _create_download_tasks_for_product(self, db: AsyncSession, 
                                               product: ProductMaster) -> List[Dict[str, Any]]:
        """为产品创建下载任务"""
        tasks = []
        
        try:
            # 解析图片URL
            urls = json.loads(product.original_image_urls or '[]')
            if not urls:
                return tasks
            
            for i, url in enumerate(urls):
                if not url or not url.strip():
                    continue
                
                url = url.strip()
                
                # 检查是否已有下载记录
                existing_log = await self._get_existing_download_log(db, product.id, url)
                if existing_log and existing_log.download_status == 'completed':
                    continue  # 已下载成功，跳过
                
                # 生成本地文件名
                local_filename = self._generate_local_filename(url, product, i)
                local_path = self.images_dir / local_filename
                
                # 创建任务
                task = {
                    'product_id': product.id,
                    'sku_key': product.sku_key,
                    'product_name': product.线上宝贝名称 or '',
                    'sales_attr': product.线上销售属性 or '',
                    'url': url,
                    'local_path': local_path,
                    'local_filename': local_filename,
                    'image_index': i,
                    'log_id': existing_log.id if existing_log else None
                }
                
                tasks.append(task)
        
        except (json.JSONDecodeError, TypeError) as e:
            logger.error(f"解析产品 {product.id} 图片URL失败: {e}")
        
        return tasks
    
    async def _get_existing_download_log(self, db: AsyncSession, 
                                       product_id: int, url: str) -> Optional[ImageDownloadLog]:
        """获取现有下载日志"""
        result = await db.execute(
            select(ImageDownloadLog)
            .where(ImageDownloadLog.product_master_id == product_id)
            .where(ImageDownloadLog.original_url == url)
        )
        return result.scalar_one_or_none()
    
    def _generate_local_filename(self, url: str, product: ProductMaster, index: int) -> str:
        """生成本地文件名"""
        # 解析URL获取文件扩展名
        parsed_url = urlparse(url)
        path = parsed_url.path
        ext = Path(path).suffix.lower()
        if not ext or ext not in ['.jpg', '.jpeg', '.png', '.gif', '.webp']:
            ext = '.jpg'  # 默认扩展名
        
        # 生成基于URL和产品信息的唯一文件名
        url_hash = hashlib.md5(url.encode()).hexdigest()[:12]
        
        # 清理产品名称和属性
        safe_name = self._clean_filename(product.线上宝贝名称 or '')[:30]
        safe_attr = self._clean_filename(product.线上销售属性 or '')[:20]
        safe_brand = self._clean_filename(product.品牌 or '')[:15]
        
        # 组合文件名: hash_品牌_商品名_属性_索引.ext
        filename_parts = [url_hash]
        if safe_brand:
            filename_parts.append(safe_brand)
        if safe_name:
            filename_parts.append(safe_name)
        if safe_attr:
            filename_parts.append(safe_attr)
        filename_parts.append(f"img{index}")
        
        filename = "_".join(filename_parts) + ext
        
        # 确保文件名长度合理
        if len(filename) > 200:
            filename = f"{url_hash}_img{index}{ext}"
        
        return filename
    
    def _clean_filename(self, text: str) -> str:
        """清理文件名中的特殊字符"""
        if not text:
            return ""
        
        # 只保留字母、数字、中文、空格、连字符
        import re
        cleaned = re.sub(r'[^\w\u4e00-\u9fff\s\-]', '', text)
        # 替换空格为下划线
        cleaned = re.sub(r'\s+', '_', cleaned)
        # 移除多余的下划线
        cleaned = re.sub(r'_+', '_', cleaned).strip('_')
        
        return cleaned
    
    async def _execute_downloads(self, tasks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """执行下载任务"""
        semaphore = asyncio.Semaphore(self.max_concurrent)
        
        # 创建HTTP连接器
        connector = aiohttp.TCPConnector(
            limit=self.max_concurrent * 2,
            limit_per_host=3,
            keepalive_timeout=30
        )
        
        timeout = aiohttp.ClientTimeout(total=60, connect=10)
        
        async with aiohttp.ClientSession(
            connector=connector,
            timeout=timeout,
            headers=self.headers
        ) as session:
            
            # 创建并发任务
            download_coroutines = [
                self._download_single_image(session, semaphore, task)
                for task in tasks
            ]
            
            # 执行所有下载任务
            results = await asyncio.gather(*download_coroutines, return_exceptions=True)
            
            # 处理异常结果
            processed_results = []
            for i, result in enumerate(results):
                if isinstance(result, Exception):
                    processed_results.append({
                        'success': False,
                        'error': str(result),
                        'task': tasks[i]
                    })
                else:
                    processed_results.append(result)
            
            return processed_results
    
    async def _download_single_image(self, session: aiohttp.ClientSession,
                                   semaphore: asyncio.Semaphore,
                                   task: Dict[str, Any]) -> Dict[str, Any]:
        """下载单个图片"""
        async with semaphore:
            return await self._do_download_image(session, task)
    
    async def _do_download_image(self, session: aiohttp.ClientSession,
                               task: Dict[str, Any]) -> Dict[str, Any]:
        """执行图片下载"""
        url = task['url']
        local_path = task['local_path']
        
        # 如果文件已存在且大小合理，跳过下载
        if local_path.exists():
            file_size = local_path.stat().st_size
            if file_size > 1024:  # 文件大小大于1KB
                logger.debug(f"文件已存在，跳过下载: {local_path.name}")
                return {
                    'success': True,
                    'task': task,
                    'status': 'exists',
                    'file_size': file_size,
                    'local_path': str(local_path)
                }
        
        start_time = datetime.now()
        
        try:
            # 下载图片
            async with session.get(url) as response:
                if response.status != 200:
                    return {
                        'success': False,
                        'task': task,
                        'error': f'HTTP {response.status}: {response.reason}',
                    }
                
                # 检查文件大小
                content_length = response.headers.get('content-length')
                if content_length:
                    size_mb = int(content_length) / (1024 * 1024)
                    if size_mb > self.max_size_mb:
                        return {
                            'success': False,
                            'task': task,
                            'error': f'文件过大: {size_mb:.1f}MB > {self.max_size_mb}MB'
                        }
                
                # 写入文件
                async with aiofiles.open(local_path, 'wb') as f:
                    async for chunk in response.content.iter_chunked(8192):
                        await f.write(chunk)
                
                # 获取文件信息
                file_size = local_path.stat().st_size
                download_time = (datetime.now() - start_time).total_seconds()
                
                logger.debug(f"下载成功: {local_path.name} "
                           f"({file_size:,} bytes, {download_time:.2f}s)")
                
                return {
                    'success': True,
                    'task': task,
                    'status': 'downloaded',
                    'file_size': file_size,
                    'download_time': download_time,
                    'local_path': str(local_path)
                }
        
        except asyncio.TimeoutError:
            return {
                'success': False,
                'task': task,
                'error': '下载超时'
            }
        except Exception as e:
            return {
                'success': False,
                'task': task,
                'error': f'下载异常: {str(e)}'
            }
    
    async def _process_download_results(self, db: AsyncSession, 
                                      results: List[Dict[str, Any]]):
        """处理下载结果，更新数据库"""
        product_updates = {}  # 产品级别的更新
        
        for result in results:
            task = result['task']
            product_id = task['product_id']
            
            # 更新或创建下载日志
            await self._update_download_log(db, result)
            
            # 收集产品级别的更新信息
            if product_id not in product_updates:
                product_updates[product_id] = {
                    'local_paths': [],
                    'success_count': 0,
                    'total_count': 0
                }
            
            product_updates[product_id]['total_count'] += 1
            
            if result['success']:
                product_updates[product_id]['success_count'] += 1
                if 'local_path' in result:
                    product_updates[product_id]['local_paths'].append(result['local_path'])
        
        # 更新产品主表
        for product_id, update_info in product_updates.items():
            await self._update_product_image_status(db, product_id, update_info)
        
        await db.commit()
    
    async def _update_download_log(self, db: AsyncSession, result: Dict[str, Any]):
        """更新下载日志"""
        task = result['task']
        
        if task['log_id']:
            # 更新现有日志
            update_data = {
                'download_status': 'completed' if result['success'] else 'failed',
                'download_completed_at': datetime.now() if result['success'] else None,
                'error_message': result.get('error'),
                'file_size': result.get('file_size'),
                'local_file_path': result.get('local_path')
            }
            
            await db.execute(
                update(ImageDownloadLog)
                .where(ImageDownloadLog.id == task['log_id'])
                .values(**update_data)
            )
        else:
            # 创建新日志
            log_entry = ImageDownloadLog(
                product_master_id=task['product_id'],
                sku_key=task['sku_key'],
                original_url=task['url'],
                local_file_path=result.get('local_path'),
                file_size=result.get('file_size'),
                file_name=task['local_filename'],
                download_status='completed' if result['success'] else 'failed',
                error_message=result.get('error'),
                download_started_at=datetime.now(),
                download_completed_at=datetime.now() if result['success'] else None
            )
            
            db.add(log_entry)
    
    async def _update_product_image_status(self, db: AsyncSession, 
                                         product_id: int, update_info: Dict[str, Any]):
        """更新产品图片状态"""
        # 判断整体状态
        if update_info['success_count'] == update_info['total_count']:
            status = 'completed'
        elif update_info['success_count'] > 0:
            status = 'partial'
        else:
            status = 'failed'
        
        # 更新产品主表
        update_data = {
            'image_download_status': status,
            'updated_at': datetime.now()
        }
        
        # 设置本地图片路径
        if update_info['local_paths']:
            update_data['local_image_paths'] = json.dumps(update_info['local_paths'], ensure_ascii=False)
            update_data['main_image_path'] = update_info['local_paths'][0]  # 第一张作为主图
        
        await db.execute(
            update(ProductMaster)
            .where(ProductMaster.id == product_id)
            .values(**update_data)
        )
    
    async def _mark_products_use_online(self, db: AsyncSession) -> int:
        """
        标记所有待下载产品使用在线URL
        这样前端可以立即显示图片，而后台继续下载
        
        Returns:
            标记的产品数量
        """
        # 查找有在线URL但未下载的产品
        query = select(ProductMaster).where(
            ProductMaster.image_download_status.in_(['pending', 'failed']),
            ProductMaster.original_image_urls.is_not(None)
        )
        
        result = await db.execute(query)
        products = result.scalars().all()
        
        marked_count = 0
        for product in products:
            try:
                # 解析在线URL
                urls = json.loads(product.original_image_urls or '[]')
                if urls and urls[0]:
                    # 设置主图为第一个在线URL（临时使用）
                    await db.execute(
                        update(ProductMaster)
                        .where(ProductMaster.id == product.id)
                        .values(
                            main_image_path=urls[0],  # 临时使用在线URL
                            image_download_status='online_pending',  # 新状态：在线待下载
                            updated_at=datetime.now()
                        )
                    )
                    marked_count += 1
            except (json.JSONDecodeError, TypeError) as e:
                logger.error(f"处理产品 {product.id} 的图片URL失败: {e}")
                continue
        
        await db.commit()
        logger.info(f"已标记 {marked_count} 个产品使用在线URL")
        return marked_count
    
    def get_local_image_url(self, local_path: str) -> str:
        """生成本地图片的URL"""
        if not local_path:
            return ""
        
        # 相对于images目录的路径
        relative_path = Path(local_path).name
        return f"/api/v1/static/images/{relative_path}"
    
    async def download_single_image(self, url: str, product_id: int) -> Optional[str]:
        """
        下载单张图片并返回本地路径
        
        Args:
            url: 图片URL
            product_id: 产品ID
            
        Returns:
            本地文件路径字符串，失败返回None
        """
        try:
            # 生成本地文件名
            url_hash = hashlib.md5(url.encode()).hexdigest()[:8]
            ext = self._get_extension_from_url(url)
            filename = f"{product_id}_{url_hash}{ext}"
            local_path = self.images_dir / filename
            
            # 如果文件已存在且大小合理，直接返回
            if local_path.exists():
                file_size = local_path.stat().st_size
                if file_size > 1024:  # 文件大小大于1KB
                    logger.info(f"图片已存在: {filename}")
                    return f"images/{filename}"
            
            # 创建下载任务
            task = {
                'url': url,
                'local_path': local_path,
                'product_id': product_id
            }
            
            # 执行下载
            connector = aiohttp.TCPConnector()
            async with aiohttp.ClientSession(
                connector=connector,
                timeout=aiohttp.ClientTimeout(total=self.timeout),
                headers=self.headers
            ) as session:
                result = await self._do_download_image(session, task)
                
                if result['success']:
                    logger.info(f"图片下载成功: {filename}")
                    return f"images/{filename}"
                else:
                    logger.error(f"图片下载失败: {result.get('error')}")
                    return None
                    
        except Exception as e:
            logger.error(f"下载单张图片失败: {e}")
            return None
    
    def _get_extension_from_url(self, url: str) -> str:
        """从URL获取文件扩展名"""
        # 解析URL路径
        path = urlparse(url).path
        
        # 从路径中提取扩展名
        if '.' in path:
            ext = path.split('.')[-1].lower()
            # 验证扩展名
            if ext in ['jpg', 'jpeg', 'png', 'gif', 'webp', 'bmp']:
                return f".{ext}"
        
        # 默认使用jpg
        return ".jpg"
