import Photos import Vision class ClassifyPhoto { struct PhotoSizeInfo { var totalSize: Int64 = 0 var count: Int = 0 } struct ClassifiedPhotos { var screenshots: [PHAsset] = [] var locations: [String: [PHAsset]] = [:] // 按地点分组 var people: [String: [PHAsset]] = [:] // 按人物分组 var similarPhotos: [[PHAsset]] = [] // 存储相似照片组 var blurryPhotos: [PHAsset] = [] // 添加模糊照片数组 // 添加容量信息 var screenshotsSize: PhotoSizeInfo = PhotoSizeInfo() var locationsSize: PhotoSizeInfo = PhotoSizeInfo() var peopleSize: PhotoSizeInfo = PhotoSizeInfo() var similarPhotosSize: PhotoSizeInfo = PhotoSizeInfo() var blurryPhotosSize: PhotoSizeInfo = PhotoSizeInfo() // 添加模糊照片容量信息 } // 添加位置缓存 private var locationCache: [String: String] = [:] func classifyPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping (ClassifiedPhotos) -> Void ) { // 在后台队列处理 DispatchQueue.global(qos: .userInitiated).async { var result = ClassifiedPhotos() let group = DispatchGroup() // 开始处理 DispatchQueue.main.async { progressHandler("正在加载照片...", 0.0) } // 先处理模糊照片检测(占进度的 30%) group.enter() progressHandler("正在检测模糊照片...", 0.0) self.detectBlurryPhotos(from: assets) { blurryPhotos in result.blurryPhotos = blurryPhotos progressHandler("模糊照片检测完成", 0.3) group.leave() } // 1. 检测截图 (占总进度的 20%) group.enter() self.fetchScreenshots(from: assets) { screenshots in result.screenshots = screenshots DispatchQueue.main.async { progressHandler("正在检测截图...", 0.3) } group.leave() } // 2. 检测相似照片 (占总进度的 80%) group.enter() self.detectSimilarPhotos( assets: assets, progressHandler: { stage, progress in // 将相似照片检测的进度映射到 20%-100% 的范围 let mappedProgress = 0.3 + (progress * 0.6) DispatchQueue.main.async { progressHandler(stage, mappedProgress) } } ) { similarPhotos in result.similarPhotos = similarPhotos group.leave() } // 3. 按地点分类 (占总进度的 20%) group.enter() self.classifyByLocation(assets: assets) { locationGroups in result.locations = locationGroups DispatchQueue.main.async { progressHandler("正在按地点分类...", 0.8) } group.leave() } // 4. 按人物分类 (占总进度的 20%) group.enter() self.classifyByPeople(assets: assets) { peopleGroups in result.people = peopleGroups DispatchQueue.main.async { progressHandler("正在按人物分类...", 1.0) } group.leave() } // // 添加模糊照片检测 // group.enter() // self.detectBlurryPhotos(from: assets) { blurryPhotos in // result.blurryPhotos = blurryPhotos // DispatchQueue.main.async { // progressHandler("正在检测模糊照片...", 1.0) // } // group.leave() // } // 在所有分类完成后计算大小 group.notify(queue: .main) { let sizeGroup = DispatchGroup() // 计算截图大小 sizeGroup.enter() self.calculateAssetsSize(result.screenshots) { sizeInfo in result.screenshotsSize = sizeInfo sizeGroup.leave() } // 计算地点照片大小 sizeGroup.enter() let locationAssets = Array(result.locations.values.flatMap { $0 }) self.calculateAssetsSize(locationAssets) { sizeInfo in result.locationsSize = sizeInfo sizeGroup.leave() } // 计算人物照片大小 sizeGroup.enter() let peopleAssets = Array(result.people.values.flatMap { $0 }) self.calculateAssetsSize(peopleAssets) { sizeInfo in result.peopleSize = sizeInfo sizeGroup.leave() } // 计算相似照片大小 sizeGroup.enter() let similarAssets = Array(result.similarPhotos.flatMap { $0 }) self.calculateAssetsSize(similarAssets) { sizeInfo in result.similarPhotosSize = sizeInfo sizeGroup.leave() } // 计算模糊照片大小 sizeGroup.enter() self.calculateAssetsSize(result.blurryPhotos) { sizeInfo in result.blurryPhotosSize = sizeInfo sizeGroup.leave() } // 所有大小计算完成后回调 sizeGroup.notify(queue: .main) { progressHandler("分类完成", 1.0) completion(result) } } // // 等待所有处理完成 // group.notify(queue: .main) { // progressHandler("分类完成", 1.0) // completion(result) // } } } private func detectSimilarPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping ([[PHAsset]]) -> Void ) { var similarGroups: [[PHAsset]] = [] let group = DispatchGroup() if #available(iOS 13.0, *) { var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = [] // 创建处理队列 let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated) let semaphore = DispatchSemaphore(value: 5) // 1. 提取所有图片的特征 let totalAssets = assets.count var processedAssets = 0 progressHandler("正在加载照片...", 0.0) for i in 0..() for i in 0..= similarityThreshold { similarGroup.append(imageFeatures[j].asset) processedIndices.insert(j) } // 更新比较进度 processedComparisons += 1 let compareProgress = Float(processedComparisons) / Float(totalComparisons) progressHandler("正在比较相似度...", 0.6 + compareProgress * 0.4) } catch { print("相似度计算失败: \(error)") } } if similarGroup.count > 1 { similarGroups.append(similarGroup) } } // 按照照片数量降序排序 similarGroups.sort { $0.count > $1.count } DispatchQueue.main.async { completion(similarGroups) } } } } private func classifyByLocation(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { var locationGroups: [String: [PHAsset]] = [:] let group = DispatchGroup() let geocodeQueue = DispatchQueue(label: "com.app.geocoding") let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数 assets.enumerateObjects { asset, _, _ in if let location = asset.location { group.enter() semaphore.wait() geocodeQueue.async { let geocoder = CLGeocoder() geocoder.reverseGeocodeLocation(location) { placemarks, error in defer { semaphore.signal() group.leave() } if let placemark = placemarks?.first { let locationName = self.formatLocationName(placemark) DispatchQueue.main.async { if locationGroups[locationName] == nil { locationGroups[locationName] = [] } locationGroups[locationName]?.append(asset) } } } } } } // 等待所有地理编码完成后回调 group.notify(queue: .main) { completion(locationGroups) } } // 格式化地点名称(只返回城市名) private func formatLocationName(_ placemark: CLPlacemark) -> String { if let city = placemark.locality { return city } else if let area = placemark.administrativeArea { return area } return "其他" } // 按人物分类 private func classifyByPeople(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { var peopleGroups: [String: [PHAsset]] = [:] let group = DispatchGroup() // 创建一个数组来存储检测到人脸的照片 var facesArray: [PHAsset] = [] // 遍历所有照片 assets.enumerateObjects { asset, _, _ in group.enter() // 获取照片的缩略图进行人脸检测 let options = PHImageRequestOptions() options.isSynchronous = false options.deliveryMode = .fastFormat PHImageManager.default().requestImage( for: asset, targetSize: CGSize(width: 500, height: 500), // 使用较小的尺寸提高性能 contentMode: .aspectFit, options: options ) { image, _ in guard let image = image else { group.leave() return } // 使用 Vision 框架检测人脸 guard let ciImage = CIImage(image: image) else { group.leave() return } let request = VNDetectFaceRectanglesRequest() let handler = VNImageRequestHandler(ciImage: ciImage) do { try handler.perform([request]) if let results = request.results, !results.isEmpty { // 检测到人脸,添加到数组 DispatchQueue.main.async { facesArray.append(asset) } } } catch { print("人脸检测失败: \(error)") } group.leave() } } // 等待所有检测完成后更新结果 group.notify(queue: .main) { if !facesArray.isEmpty { peopleGroups["包含人脸的照片"] = facesArray } completion(peopleGroups) } } // 识别截图 private func fetchScreenshots(from assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { var screenshots: [PHAsset] = [] // 获取系统的截图智能相册 let screenshotAlbums = PHAssetCollection.fetchAssetCollections( with: .smartAlbum, subtype: .smartAlbumScreenshots, options: nil ) // 从截图相册中获取所有截图 screenshotAlbums.enumerateObjects { collection, _, _ in let fetchOptions = PHFetchOptions() let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions) screenshotAssets.enumerateObjects { asset, _, _ in screenshots.append(asset) } } completion(screenshots) } private func detectBlurryPhotos(from assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { var blurryPhotos: [PHAsset] = [] let group = DispatchGroup() let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent) let resultQueue = DispatchQueue(label: "com.app.blurryResult") let semaphore = DispatchSemaphore(value: 8) // 增加并发数 // 创建进度追踪 var processedCount = 0 let totalCount = assets.count for i in 0.. Bool { let width = image.width let height = image.height let stride = 2 // 跳过一些像素以加快速度 // 提前检查图像尺寸是否合法 guard width > (2 * stride), height > (2 * stride) else { return false // 小尺寸图像直接判定为模糊或清晰 } var buffer = [UInt8](repeating: 0, count: width * height) let colorSpace = CGColorSpaceCreateDeviceGray() guard let context = CGContext( data: &buffer, width: width, height: height, bitsPerComponent: 8, bytesPerRow: width, space: colorSpace, bitmapInfo: CGImageAlphaInfo.none.rawValue ) else { return false } context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height)) // 使用简化的拉普拉斯算子 var score: Double = 0 for y in stride..<(height-stride) where y % stride == 0 { for x in stride..<(width-stride) where x % stride == 0 { let current = Int(buffer[y * width + x]) let left = Int(buffer[y * width + (x - stride)]) let right = Int(buffer[y * width + (x + stride)]) let top = Int(buffer[(y - stride) * width + x]) let bottom = Int(buffer[(y + stride) * width + x]) // 简化的边缘检测 let dx = abs(left - right) let dy = abs(top - bottom) score += Double(max(dx, dy)) } } // 归一化分数 let normalizedScore = score / Double((width * height) / (stride * stride)) // 调整阈值(可能需要根据实际效果调整) let threshold = 20.0 return normalizedScore < threshold } } extension ClassifyPhoto { // 获取资源大小的辅助方法 func getAssetSize(_ asset: PHAsset, completion: @escaping (Int64) -> Void) { let resources = PHAssetResource.assetResources(for: asset) if let resource = resources.first { var size: Int64 = 0 if let unsignedInt64 = resource.value(forKey: "fileSize") as? CLong { size = Int64(unsignedInt64) } completion(size) } else { completion(0) } } // 计算资产组的总大小 func calculateAssetsSize(_ assets: [PHAsset], completion: @escaping (PhotoSizeInfo) -> Void) { let group = DispatchGroup() var totalSize: Int64 = 0 for asset in assets { group.enter() getAssetSize(asset) { size in totalSize += size group.leave() } } group.notify(queue: .main) { completion(PhotoSizeInfo(totalSize: totalSize, count: assets.count)) } } }