import Photos import Vision class PhotoClassifier { struct ClassifiedPhotos { var screenshots: [PHAsset] = [] var locations: [String: [PHAsset]] = [:] // 按地点分组 var people: [String: [PHAsset]] = [:] // 按人物分组 var similarPhotos: [[PHAsset]] = [] // 存储相似照片组 } func classifyPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping (ClassifiedPhotos) -> Void ) { // 在后台队列处理 DispatchQueue.global(qos: .userInitiated).async { var result = ClassifiedPhotos() let group = DispatchGroup() // 开始处理 DispatchQueue.main.async { progressHandler("正在加载照片...", 0.0) } // 1. 检测截图 (占总进度的 20%) group.enter() self.fetchScreenshots(from: assets) { screenshots in result.screenshots = screenshots DispatchQueue.main.async { progressHandler("正在检测截图...", 0.2) } group.leave() } // 2. 检测相似照片 (占总进度的 80%) group.enter() self.detectSimilarPhotos( assets: assets, progressHandler: { stage, progress in // 将相似照片检测的进度映射到 20%-100% 的范围 let mappedProgress = 0.2 + (progress * 0.8) DispatchQueue.main.async { progressHandler(stage, mappedProgress) } } ) { similarPhotos in result.similarPhotos = similarPhotos group.leave() } // 等待所有处理完成 group.notify(queue: .main) { progressHandler("分类完成", 1.0) completion(result) } } } private func detectSimilarPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping ([[PHAsset]]) -> Void ) { var similarGroups: [[PHAsset]] = [] let group = DispatchGroup() var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = [] // 创建处理队列 let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated) let semaphore = DispatchSemaphore(value: 5) // 1. 提取所有图片的特征 let totalAssets = assets.count var processedAssets = 0 progressHandler("正在加载照片...", 0.0) for i in 0..() for i in 0..= similarityThreshold { similarGroup.append(imageFeatures[j].asset) processedIndices.insert(j) } // 更新比较进度 processedComparisons += 1 let compareProgress = Float(processedComparisons) / Float(totalComparisons) progressHandler("正在比较相似度...", 0.6 + compareProgress * 0.4) } catch { print("相似度计算失败: \(error)") } } if similarGroup.count > 1 { similarGroups.append(similarGroup) } } // 按照照片数量降序排序 similarGroups.sort { $0.count > $1.count } DispatchQueue.main.async { completion(similarGroups) } } } // 按地点分类 private func classifyByLocation(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { var locationGroups: [String: [PHAsset]] = [:] let group = DispatchGroup() let geocodeQueue = DispatchQueue(label: "com.app.geocoding") let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数 assets.enumerateObjects { asset, _, _ in if let location = asset.location { group.enter() semaphore.wait() geocodeQueue.async { let geocoder = CLGeocoder() geocoder.reverseGeocodeLocation(location) { placemarks, error in defer { semaphore.signal() group.leave() } if let placemark = placemarks?.first { let locationName = self.formatLocationName(placemark) DispatchQueue.main.async { if locationGroups[locationName] == nil { locationGroups[locationName] = [] } locationGroups[locationName]?.append(asset) } } } } } } // 等待所有地理编码完成后回调 group.notify(queue: .main) { completion(locationGroups) } } // 格式化地点名称(只返回城市名) private func formatLocationName(_ placemark: CLPlacemark) -> String { if let city = placemark.locality { return city } return "其他" } // 按人物分类 private func classifyByPeople(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { var peopleGroups: [String: [PHAsset]] = [:] let group = DispatchGroup() // 创建一个数组来存储检测到人脸的照片 var facesArray: [PHAsset] = [] // 遍历所有照片 assets.enumerateObjects { asset, _, _ in group.enter() // 获取照片的缩略图进行人脸检测 let options = PHImageRequestOptions() options.isSynchronous = false options.deliveryMode = .fastFormat PHImageManager.default().requestImage( for: asset, targetSize: CGSize(width: 500, height: 500), // 使用较小的尺寸提高性能 contentMode: .aspectFit, options: options ) { image, _ in guard let image = image else { group.leave() return } // 使用 Vision 框架检测人脸 guard let ciImage = CIImage(image: image) else { group.leave() return } let request = VNDetectFaceRectanglesRequest() let handler = VNImageRequestHandler(ciImage: ciImage) do { try handler.perform([request]) if let results = request.results, !results.isEmpty { // 检测到人脸,添加到数组 DispatchQueue.main.async { facesArray.append(asset) } } } catch { print("人脸检测失败: \(error)") } group.leave() } } // 等待所有检测完成后更新结果 group.notify(queue: .main) { if !facesArray.isEmpty { peopleGroups["包含人脸的照片"] = facesArray } completion(peopleGroups) } } // 识别截图 private func fetchScreenshots(from assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { var screenshots: [PHAsset] = [] // 获取系统的截图智能相册 let screenshotAlbums = PHAssetCollection.fetchAssetCollections( with: .smartAlbum, subtype: .smartAlbumScreenshots, options: nil ) // 从截图相册中获取所有截图 screenshotAlbums.enumerateObjects { collection, _, _ in let fetchOptions = PHFetchOptions() let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions) screenshotAssets.enumerateObjects { asset, _, _ in screenshots.append(asset) } } completion(screenshots) } }