import Photos import Vision class ClassifyPhoto { struct PhotoSizeInfo { var totalSize: Int64 = 0 var count: Int = 0 } struct ClassifiedPhotos { var screenshots: [PHAsset] = [] var locations: [String: [PHAsset]] = [:] // 按地点分组 var people: [String: [PHAsset]] = [:] // 按人物分组 var similarPhotos: [[PHAsset]] = [] // 存储相似照片组 var blurryPhotos: [PHAsset] = [] // 添加模糊照片数组 // 添加容量信息 var screenshotsSize: PhotoSizeInfo = PhotoSizeInfo() var locationsSize: PhotoSizeInfo = PhotoSizeInfo() var peopleSize: PhotoSizeInfo = PhotoSizeInfo() var similarPhotosSize: PhotoSizeInfo = PhotoSizeInfo() var blurryPhotosSize: PhotoSizeInfo = PhotoSizeInfo() // 添加模糊照片容量信息 } // 添加位置缓存 private var locationCache: [String: String] = [:] func classifyPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping (ClassifiedPhotos) -> Void ) { // 在后台队列处理 DispatchQueue.global(qos: .userInitiated).async { var result = ClassifiedPhotos() let group = DispatchGroup() // 开始处理 DispatchQueue.main.async { progressHandler("正在加载照片...", 0.0) } // 先处理模糊照片检测(占进度的 30%) group.enter() progressHandler("正在检测模糊照片...", 0.0) self.detectBlurryPhotos(from: assets) { blurryPhotos in result.blurryPhotos = blurryPhotos progressHandler("模糊照片检测完成", 0.3) group.leave() } // 1. 检测截图 (占总进度的 20%) group.enter() self.fetchScreenshots(from: assets) { screenshots in result.screenshots = screenshots DispatchQueue.main.async { progressHandler("正在检测截图...", 0.3) } group.leave() } // 2. 检测相似照片 (占总进度的 80%) group.enter() self.detectSimilarPhotos( assets: assets, progressHandler: { stage, progress in // 将相似照片检测的进度映射到 20%-100% 的范围 let mappedProgress = 0.3 + (progress * 0.6) DispatchQueue.main.async { progressHandler(stage, mappedProgress) } } ) { similarPhotos in result.similarPhotos = similarPhotos group.leave() } // 3. 按地点分类 (占总进度的 20%) // group.enter() // self.classifyByLocation(assets: assets) { locationGroups in // result.locations = locationGroups // DispatchQueue.main.async { // progressHandler("正在按地点分类...", 0.8) // } // group.leave() // } // 4. 按人物分类 (占总进度的 20%) group.enter() self.classifyByPeople(assets: assets) { peopleGroups in result.people = peopleGroups DispatchQueue.main.async { progressHandler("正在按人物分类...", 1.0) } group.leave() } // // 添加模糊照片检测 // group.enter() // self.detectBlurryPhotos(from: assets) { blurryPhotos in // result.blurryPhotos = blurryPhotos // DispatchQueue.main.async { // progressHandler("正在检测模糊照片...", 1.0) // } // group.leave() // } // 在所有分类完成后计算大小 group.notify(queue: .main) { let sizeGroup = DispatchGroup() // 计算模糊照片大小 sizeGroup.enter() self.calculateAssetsSize(result.blurryPhotos) { sizeInfo in result.blurryPhotosSize = sizeInfo sizeGroup.leave() } // 计算相似照片大小 sizeGroup.enter() let similarAssets = Array(result.similarPhotos.flatMap { $0 }) self.calculateAssetsSize(similarAssets) { sizeInfo in result.similarPhotosSize = sizeInfo sizeGroup.leave() } // 计算截图大小 sizeGroup.enter() self.calculateAssetsSize(result.screenshots) { sizeInfo in result.screenshotsSize = sizeInfo sizeGroup.leave() } // // 计算地点照片大小 // sizeGroup.enter() // let locationAssets = Array(result.locations.values.flatMap { $0 }) // self.calculateAssetsSize(locationAssets) { sizeInfo in // result.locationsSize = sizeInfo // sizeGroup.leave() // } // 计算人物照片大小 sizeGroup.enter() let peopleAssets = Array(result.people.values.flatMap { $0 }) self.calculateAssetsSize(peopleAssets) { sizeInfo in result.peopleSize = sizeInfo sizeGroup.leave() } // 所有大小计算完成后回调 sizeGroup.notify(queue: .main) { progressHandler("分类完成", 1.0) completion(result) } } } } // 添加内存清理辅助方法 private func cleanupMemory() { // 清理图像缓存 URLCache.shared.removeAllCachedResponses() // 强制进行一次垃圾回收 autoreleasepool { let _ = [String](repeating: "temp", count: 1) } #if os(iOS) // 发送低内存警告 UIApplication.shared.perform(Selector(("_performMemoryWarning"))) #endif } func detectSimilarPhotos( assets: PHFetchResult, progressHandler: @escaping (String, Float) -> Void, completion: @escaping ([[PHAsset]]) -> Void ) { var similarGroups: [[PHAsset]] = [] let group = DispatchGroup() if #available(iOS 13.0, *) { var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = [] // 创建处理队列 let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated) let semaphore = DispatchSemaphore(value: 4) // 增加并发数以提高效率 // 1. 提取所有图片的特征 let totalAssets = assets.count var processedAssets = 0 progressHandler("正在加载照片...", 0.0) for i in 0..>(Set()) var groupResults = Atomic<[Int: [PHAsset]]>([:]) // 分批处理,每批处理一部分数据 let batchSize = min(50, imageFeatures.count) // 修复 Float 转换错误 let batchCount = Float(imageFeatures.count) / Float(batchSize) let batches = batchCount.isFinite ? Int(ceil(batchCount)) : 1 for batchIndex in 0..= similarityThreshold { similarAssets.append(imageFeatures[j].asset) processedIndices.mutate { $0.insert(j) } } } catch { print("相似度计算失败: \(error)") } } // 只保存有多个相似图像的组 if similarAssets.count > 1 { resultsQueue.async { groupResults.mutate { $0[i] = similarAssets } } } // 更新进度 - 添加安全检查 if imageFeatures.count > 0 { let processedCount = Float(processedIndices.value.count) let totalCount = Float(imageFeatures.count) // 确保进度值有效 var progress: Float = 0 if processedCount.isFinite && totalCount.isFinite && totalCount > 0 { progress = processedCount / totalCount // 限制进度范围 progress = max(0, min(1, progress)) } DispatchQueue.main.async { progressHandler("正在比较相似度...", 0.6 + progress * 0.4) } } semaphore.signal() processingGroup.leave() } } } processingGroup.wait() // 整理结果 similarGroups = Array(groupResults.value.values) // 按照照片数量降序排序 similarGroups.sort { $0.count > $1.count } DispatchQueue.main.async { completion(similarGroups) } } } } func classifyByLocation(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { var locationGroups: [String: [PHAsset]] = [:] let group = DispatchGroup() let geocodeQueue = DispatchQueue(label: "com.app.geocoding") let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数 assets.enumerateObjects { asset, _, _ in if let location = asset.location { group.enter() semaphore.wait() geocodeQueue.async { let geocoder = CLGeocoder() geocoder.reverseGeocodeLocation(location) { placemarks, error in defer { semaphore.signal() group.leave() } if let placemark = placemarks?.first { let locationName = self.formatLocationName(placemark) DispatchQueue.main.async { if locationGroups[locationName] == nil { locationGroups[locationName] = [] } locationGroups[locationName]?.append(asset) } } } } } } // 等待所有地理编码完成后回调 group.notify(queue: .main) { completion(locationGroups) } } // 格式化地点名称(只返回城市名) func formatLocationName(_ placemark: CLPlacemark) -> String { if let city = placemark.locality { return city } else if let area = placemark.administrativeArea { return area } return "其他" } func classifyByPeople(assets: PHFetchResult, completion: @escaping ([String: [PHAsset]]) -> Void) { // 创建结果字典 var peopleGroups: [String: [PHAsset]] = [:] peopleGroups["包含人脸的照片"] = [] // 使用主队列确保安全完成 let mainCompletion: ([String: [PHAsset]]) -> Void = { result in DispatchQueue.main.async { completion(result) } } // 限制处理的照片数量,防止内存过载 let totalCount = min(500, assets.count) if totalCount == 0 { mainCompletion(peopleGroups) return } // 创建专用队列 let processingQueue = DispatchQueue(label: "com.app.peopleDetection", qos: .userInitiated, attributes: .concurrent) let resultQueue = DispatchQueue(label: "com.app.peopleResult", qos: .userInitiated) // 使用NSLock替代原子操作,更安全 let resultLock = NSLock() // 创建进度追踪 let processedCount = Atomic(0) // 分批处理,每批处理一部分数据 let batchSize = 20 let batches = Int(ceil(Float(totalCount) / Float(batchSize))) // 创建一个组来等待所有操作完成 let group = DispatchGroup() // 创建一个Vision请求处理器 let faceDetectionRequest = VNDetectFaceRectanglesRequest() // 防止过早释放 var strongSelf: AnyObject? = self for batchIndex in 0.. Void) { guard let cgImage = image.cgImage else { completion(false) return } // 在后台线程执行检测 DispatchQueue.global(qos: .userInitiated).async { autoreleasepool { let handler = VNImageRequestHandler(cgImage: cgImage, options: [:]) do { try handler.perform([request]) let hasFace = request.results?.isEmpty == false completion(hasFace) } catch { print("人脸检测失败: \(error)") completion(false) } } } } // func classifyByPeople(assets: PHFetchResult, // completion: @escaping ([String: [PHAsset]]) -> Void) { // var peopleGroups: [String: [PHAsset]] = [:] // let group = DispatchGroup() // // // 创建专用队列和信号量控制并发 // let processingQueue = DispatchQueue(label: "com.app.peopleDetection", qos: .userInitiated, attributes: .concurrent) // let resultQueue = DispatchQueue(label: "com.app.peopleResult", qos: .userInitiated) // let semaphore = DispatchSemaphore(value: 4) // 限制并发数 // // // 创建进度追踪 // var processedCount = 0 // let totalCount = assets.count // // // 分批处理,每批处理一部分数据 // let batchSize = 50 // let batches = Int(ceil(Float(assets.count) / Float(batchSize))) // // for batchIndex in 0.., // completion: @escaping ([String: [PHAsset]]) -> Void) { // var peopleGroups: [String: [PHAsset]] = [:] // let group = DispatchGroup() // // DispatchQueue.global(qos: .background).async { // // 创建一个数组来存储检测到人脸的照片 // var facesArray: [PHAsset] = [] // // // 遍历所有照片 // assets.enumerateObjects { asset, _, _ in // group.enter() // // // 获取照片的缩略图进行人脸检测 // let options = PHImageRequestOptions() // options.isSynchronous = false // options.deliveryMode = .fastFormat // // PHImageManager.default().requestImage( // for: asset, // targetSize: CGSize(width: 128, height: 128), // 使用较小的尺寸提高性能 // contentMode: .aspectFit, // options: options // ) { image, _ in // guard let image = image else { // group.leave() // return // } // // // 使用 Vision 框架检测人脸 // guard let ciImage = CIImage(image: image) else { // group.leave() // return // } // // let request = VNDetectFaceRectanglesRequest() // let handler = VNImageRequestHandler(ciImage: ciImage) // // do { // try handler.perform([request]) // if let results = request.results, !results.isEmpty { // // 检测到人脸,添加到数组 // DispatchQueue.main.async { // facesArray.append(asset) // } // } // } catch { // print("人脸检测失败: \(error)") // } // // group.leave() // } // } // // // 等待所有检测完成后更新结果 // group.notify(queue: .main) { // if !facesArray.isEmpty { // peopleGroups["包含人脸的照片"] = facesArray // } // completion(peopleGroups) // } // } // } // 识别截图 func fetchScreenshots(from assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { var screenshots: [PHAsset] = [] // 获取系统的截图智能相册 let screenshotAlbums = PHAssetCollection.fetchAssetCollections( with: .smartAlbum, subtype: .smartAlbumScreenshots, options: nil ) // 从截图相册中获取所有截图 screenshotAlbums.enumerateObjects { collection, _, _ in let fetchOptions = PHFetchOptions() let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions) screenshotAssets.enumerateObjects { asset, _, _ in screenshots.append(asset) } } completion(screenshots) } // 修改辅助方法以接受 PHFetchResult // private func detectScreenshots(assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { // let processingQueue = DispatchQueue(label: "com.yourapp.screenshots.processing", attributes: .concurrent) // let resultQueue = DispatchQueue(label: "com.yourapp.screenshots.results") // let group = DispatchGroup() // let semaphore = DispatchSemaphore(value: 4) // 限制并发数 // // let screenshots = Atomic<[PHAsset]>([]) // // // 分批处理 // let totalCount = assets.count // let batchSize = 50 // let batches = Int(ceil(Float(totalCount) / Float(batchSize))) // // for batchIndex in 0.., completion: @escaping ([PHAsset]) -> Void) { var blurryPhotos: [PHAsset] = [] let group = DispatchGroup() let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent) let resultQueue = DispatchQueue(label: "com.app.blurryResult") let semaphore = DispatchSemaphore(value: 8) // 增加并发数 // 创建进度追踪 var processedCount = 0 let totalCount = assets.count // 分批处理,每批处理一部分数据 let batchSize = 50 let batches = Int(ceil(Float(assets.count) / Float(batchSize))) for batchIndex in 0.. Bool { guard let cgImage = image.cgImage else { return false } // 使用更小的采样区域 let width = cgImage.width let height = cgImage.height let pixelStride = 4 // 改名为pixelStride,避免与函数名冲突 // 提前检查图像尺寸是否合法 guard width > (2 * pixelStride), height > (2 * pixelStride) else { return false } // 使用vImage进行快速处理 var buffer = [UInt8](repeating: 0, count: width * height) let colorSpace = CGColorSpaceCreateDeviceGray() guard let context = CGContext( data: &buffer, width: width, height: height, bitsPerComponent: 8, bytesPerRow: width, space: colorSpace, bitmapInfo: CGImageAlphaInfo.none.rawValue ) else { return false } context.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height)) // 使用拉普拉斯算子的简化版本 var score: Double = 0 var sampledPixels = 0 // 只采样图像的一部分区域 let sampleRows = 10 let sampleCols = 10 // 计算步长 let rowStep = max(1, height / sampleRows) let colStep = max(1, width / sampleCols) // 使用Swift的stride函数 for y in Swift.stride(from: pixelStride, to: height - pixelStride, by: rowStep) { for x in Swift.stride(from: pixelStride, to: width - pixelStride, by: colStep) { let current = Int(buffer[y * width + x]) let left = Int(buffer[y * width + (x - pixelStride)]) let right = Int(buffer[y * width + (x + pixelStride)]) let top = Int(buffer[(y - pixelStride) * width + x]) let bottom = Int(buffer[(y + pixelStride) * width + x]) // 简化的边缘检测 let dx = abs(left - right) let dy = abs(top - bottom) score += Double(max(dx, dy)) sampledPixels += 1 } } // 避免除以零 guard sampledPixels > 0 else { return false } // 归一化分数 let normalizedScore = score / Double(sampledPixels) // 调整阈值 let threshold = 15.0 return normalizedScore < threshold } // ... existing code ... // func detectBlurryPhotos(from assets: PHFetchResult, completion: @escaping ([PHAsset]) -> Void) { // var blurryPhotos: [PHAsset] = [] // let group = DispatchGroup() // let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent) // let resultQueue = DispatchQueue(label: "com.app.blurryResult") // let semaphore = DispatchSemaphore(value: 5) // 增加并发数 // // // 创建进度追踪 // var processedCount = 0 // let totalCount = assets.count // // for i in 0.. Bool { // // let width = image.width // let height = image.height // let stride = 2 // 跳过一些像素以加快速度 // // // 提前检查图像尺寸是否合法 // guard width > (2 * stride), height > (2 * stride) else { // return false // 小尺寸图像直接判定为模糊或清晰 // } // // var buffer = [UInt8](repeating: 0, count: width * height) // // let colorSpace = CGColorSpaceCreateDeviceGray() // guard let context = CGContext( // data: &buffer, // width: width, // height: height, // bitsPerComponent: 8, // bytesPerRow: width, // space: colorSpace, // bitmapInfo: CGImageAlphaInfo.none.rawValue // ) else { // return false // } // // context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height)) // // // 使用简化的拉普拉斯算子 // var score: Double = 0 // // for y in stride..<(height-stride) where y % stride == 0 { // for x in stride..<(width-stride) where x % stride == 0 { // let current = Int(buffer[y * width + x]) // let left = Int(buffer[y * width + (x - stride)]) // let right = Int(buffer[y * width + (x + stride)]) // let top = Int(buffer[(y - stride) * width + x]) // let bottom = Int(buffer[(y + stride) * width + x]) // // // 简化的边缘检测 // let dx = abs(left - right) // let dy = abs(top - bottom) // score += Double(max(dx, dy)) // } // } // // // 归一化分数 // let normalizedScore = score / Double((width * height) / (stride * stride)) // // // 调整阈值(可能需要根据实际效果调整) // let threshold = 20.0 // return normalizedScore < threshold // } } extension ClassifyPhoto { // 获取资源大小的辅助方法 func getAssetSize(_ asset: PHAsset, completion: @escaping (Int64) -> Void) { DispatchQueue.global(qos: .background).async { let resources = PHAssetResource.assetResources(for: asset) if let resource = resources.first { var size: Int64 = 0 if let unsignedInt64 = resource.value(forKey: "fileSize") as? CLong { size = Int64(unsignedInt64) } DispatchQueue.main.async { completion(size) } } else { DispatchQueue.main.async { completion(0) } } } } // 计算资产组的总大小 func calculateAssetsSize(_ assets: [PHAsset], completion: @escaping (PhotoSizeInfo) -> Void) { print("正在计算图片组容量大小") let group = DispatchGroup() var totalSize: Int64 = 0 for asset in assets { group.enter() getAssetSize(asset) { size in totalSize += size group.leave() } } group.notify(queue: .main) { completion(PhotoSizeInfo(totalSize: totalSize, count: assets.count)) } } } extension ClassifyPhoto { // 添加一个处理 P3 色彩空间图像的辅助方法 private func processImageWithSafeColorSpace(_ image: UIImage) -> UIImage? { autoreleasepool { guard let cgImage = image.cgImage else { return image } // 检查色彩空间 if let colorSpace = cgImage.colorSpace, (colorSpace.name as String?) == CGColorSpace.displayP3 as String { // 转换为 sRGB 色彩空间 let sRGBColorSpace = CGColorSpaceCreateDeviceRGB() if let context = CGContext( data: nil, width: cgImage.width, height: cgImage.height, bitsPerComponent: 8, bytesPerRow: 0, space: sRGBColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue ) { context.draw(cgImage, in: CGRect(x: 0, y: 0, width: cgImage.width, height: cgImage.height)) if let convertedImage = context.makeImage() { return UIImage(cgImage: convertedImage, scale: image.scale, orientation: image.imageOrientation) } } } return image } } // 修改图像请求方法,添加色彩空间处理 private func requestImageWithSafeProcessing( for asset: PHAsset, targetSize: CGSize, contentMode: PHImageContentMode, options: PHImageRequestOptions?, completion: @escaping (UIImage?) -> Void ) { PHImageManager.default().requestImage( for: asset, targetSize: targetSize, contentMode: contentMode, options: options ) { image, info in guard let image = image else { completion(nil) return } // 处理可能的 P3 色彩空间图像 DispatchQueue.global(qos: .userInitiated).async { let processedImage = self.processImageWithSafeColorSpace(image) DispatchQueue.main.async { completion(processedImage) } } } } } class Atomic { private var value_: T private let lock = NSLock() init(_ value: T) { self.value_ = value } var value: T { lock.lock() defer { lock.unlock() } return value_ } func mutate(_ mutation: (inout T) -> Void) { lock.lock() defer { lock.unlock() } mutation(&value_) } }