|
|
@@ -415,108 +415,301 @@ class ClassifyPhoto {
|
|
|
}
|
|
|
|
|
|
func classifyByPeople(assets: PHFetchResult<PHAsset>,
|
|
|
- completion: @escaping ([String: [PHAsset]]) -> Void) {
|
|
|
+ completion: @escaping ([String: [PHAsset]]) -> Void) {
|
|
|
+ // 创建结果字典
|
|
|
var peopleGroups: [String: [PHAsset]] = [:]
|
|
|
- let group = DispatchGroup()
|
|
|
+ peopleGroups["包含人脸的照片"] = []
|
|
|
+
|
|
|
+ // 使用主队列确保安全完成
|
|
|
+ let mainCompletion: ([String: [PHAsset]]) -> Void = { result in
|
|
|
+ DispatchQueue.main.async {
|
|
|
+ completion(result)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // 限制处理的照片数量,防止内存过载
|
|
|
+ let totalCount = min(500, assets.count)
|
|
|
+ if totalCount == 0 {
|
|
|
+ mainCompletion(peopleGroups)
|
|
|
+ return
|
|
|
+ }
|
|
|
|
|
|
- // 创建专用队列和信号量控制并发
|
|
|
- let processingQueue = DispatchQueue(label: "com.app.peopleDetection", attributes: .concurrent)
|
|
|
- let resultQueue = DispatchQueue(label: "com.app.peopleResult")
|
|
|
- let semaphore = DispatchSemaphore(value: 4) // 限制并发数
|
|
|
+ // 创建专用队列
|
|
|
+ let processingQueue = DispatchQueue(label: "com.app.peopleDetection", qos: .userInitiated, attributes: .concurrent)
|
|
|
+ let resultQueue = DispatchQueue(label: "com.app.peopleResult", qos: .userInitiated)
|
|
|
+
|
|
|
+ // 使用NSLock替代原子操作,更安全
|
|
|
+ let resultLock = NSLock()
|
|
|
|
|
|
// 创建进度追踪
|
|
|
- var processedCount = 0
|
|
|
- let totalCount = assets.count
|
|
|
+ let processedCount = Atomic<Int>(0)
|
|
|
|
|
|
// 分批处理,每批处理一部分数据
|
|
|
- let batchSize = 50
|
|
|
- let batches = Int(ceil(Float(assets.count) / Float(batchSize)))
|
|
|
+ let batchSize = 20
|
|
|
+ let batches = Int(ceil(Float(totalCount) / Float(batchSize)))
|
|
|
+
|
|
|
+ // 创建一个组来等待所有操作完成
|
|
|
+ let group = DispatchGroup()
|
|
|
+
|
|
|
+ // 创建一个Vision请求处理器
|
|
|
+ let faceDetectionRequest = VNDetectFaceRectanglesRequest()
|
|
|
+
|
|
|
+ // 防止过早释放
|
|
|
+ var strongSelf: AnyObject? = self
|
|
|
|
|
|
for batchIndex in 0..<batches {
|
|
|
let startIndex = batchIndex * batchSize
|
|
|
- let endIndex = min(startIndex + batchSize, assets.count)
|
|
|
+ let endIndex = min(startIndex + batchSize, totalCount)
|
|
|
|
|
|
- // 使用自动释放池减少内存占用
|
|
|
- autoreleasepool {
|
|
|
+ // 每批处理前进入组
|
|
|
+ group.enter()
|
|
|
+
|
|
|
+ // 使用延迟减轻主线程压力
|
|
|
+ DispatchQueue.global(qos: .userInitiated).asyncAfter(deadline: .now() + Double(batchIndex) * 0.3) { [weak self] in
|
|
|
+ guard let self = self else {
|
|
|
+ group.leave()
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ // 创建批次内的处理组
|
|
|
+ let batchGroup = DispatchGroup()
|
|
|
+ // 限制并发数
|
|
|
+ let batchSemaphore = DispatchSemaphore(value: 2)
|
|
|
+
|
|
|
for i in startIndex..<endIndex {
|
|
|
- let asset = assets[i]
|
|
|
- group.enter()
|
|
|
- semaphore.wait()
|
|
|
-
|
|
|
- // 降低处理图片的分辨率
|
|
|
- let options = PHImageRequestOptions()
|
|
|
- options.deliveryMode = .fastFormat
|
|
|
- options.isSynchronous = false
|
|
|
- options.resizeMode = .fast
|
|
|
+ batchGroup.enter()
|
|
|
+ batchSemaphore.wait()
|
|
|
|
|
|
processingQueue.async {
|
|
|
// 使用自动释放池减少内存占用
|
|
|
autoreleasepool {
|
|
|
- let result = PHImageManager.default().requestImage(
|
|
|
+ let asset = assets[i]
|
|
|
+
|
|
|
+ // 降低处理图片的分辨率
|
|
|
+ let options = PHImageRequestOptions()
|
|
|
+ options.deliveryMode = .fastFormat
|
|
|
+ options.isSynchronous = false
|
|
|
+ options.resizeMode = .fast
|
|
|
+ options.isNetworkAccessAllowed = false
|
|
|
+
|
|
|
+ PHImageManager.default().requestImage(
|
|
|
for: asset,
|
|
|
- targetSize: CGSize(width: 128, height: 128), // 降低分辨率
|
|
|
+ targetSize: CGSize(width: 120, height: 120),
|
|
|
contentMode: .aspectFit,
|
|
|
options: options
|
|
|
- ) { image, _ in
|
|
|
+ ) { image, info in
|
|
|
defer {
|
|
|
- semaphore.signal()
|
|
|
+ batchSemaphore.signal()
|
|
|
+ batchGroup.leave()
|
|
|
}
|
|
|
|
|
|
- guard let image = image else {
|
|
|
- group.leave()
|
|
|
+ // 检查是否是降级的图像
|
|
|
+ if let degraded = info?[PHImageResultIsDegradedKey] as? Bool, degraded {
|
|
|
return
|
|
|
}
|
|
|
|
|
|
- // 使用 Vision 框架检测人脸
|
|
|
- guard let ciImage = CIImage(image: image) else {
|
|
|
- group.leave()
|
|
|
+ guard let image = image, let cgImage = image.cgImage else {
|
|
|
return
|
|
|
}
|
|
|
|
|
|
- let request = VNDetectFaceRectanglesRequest()
|
|
|
- let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])
|
|
|
+ // 使用简化的人脸检测
|
|
|
+ let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
|
|
|
|
|
|
do {
|
|
|
- try handler.perform([request])
|
|
|
- if let results = request.results, !results.isEmpty {
|
|
|
+ try handler.perform([faceDetectionRequest])
|
|
|
+ if let results = faceDetectionRequest.results, !results.isEmpty {
|
|
|
// 检测到人脸,添加到数组
|
|
|
- resultQueue.async {
|
|
|
- if peopleGroups["包含人脸的照片"] == nil {
|
|
|
- peopleGroups["包含人脸的照片"] = []
|
|
|
- }
|
|
|
- peopleGroups["包含人脸的照片"]?.append(asset)
|
|
|
- }
|
|
|
+ resultLock.lock()
|
|
|
+ peopleGroups["包含人脸的照片"]?.append(asset)
|
|
|
+ resultLock.unlock()
|
|
|
}
|
|
|
} catch {
|
|
|
print("人脸检测失败: \(error)")
|
|
|
}
|
|
|
|
|
|
// 更新进度
|
|
|
- resultQueue.async {
|
|
|
- processedCount += 1
|
|
|
- let progress = Float(processedCount) / Float(totalCount)
|
|
|
- DispatchQueue.main.async {
|
|
|
- print("人脸检测进度: \(Int(progress * 100))%")
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- group.leave()
|
|
|
+ processedCount.mutate { $0 += 1 }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ // 等待批次内所有处理完成
|
|
|
+ batchGroup.wait()
|
|
|
+
|
|
|
+ // 每批处理完后清理内存
|
|
|
+ self.cleanupMemory()
|
|
|
+
|
|
|
+ // 批次完成
|
|
|
+ group.leave()
|
|
|
}
|
|
|
-
|
|
|
- // 每批处理完后清理内存
|
|
|
- cleanupMemory()
|
|
|
}
|
|
|
|
|
|
+ // 设置超时保护
|
|
|
+ let timeoutWorkItem = DispatchWorkItem {
|
|
|
+ print("人脸检测超时,返回当前结果")
|
|
|
+ mainCompletion(peopleGroups)
|
|
|
+ strongSelf = nil
|
|
|
+ }
|
|
|
+
|
|
|
+ // 30秒后超时
|
|
|
+ DispatchQueue.global().asyncAfter(deadline: .now() + 30, execute: timeoutWorkItem)
|
|
|
+
|
|
|
// 等待所有检测完成后更新结果
|
|
|
group.notify(queue: .main) {
|
|
|
- completion(peopleGroups)
|
|
|
+ // 取消超时
|
|
|
+ timeoutWorkItem.cancel()
|
|
|
+
|
|
|
+ // 最终清理内存
|
|
|
+ self.cleanupMemory()
|
|
|
+
|
|
|
+ // 返回结果
|
|
|
+ mainCompletion(peopleGroups)
|
|
|
+
|
|
|
+ // 释放引用
|
|
|
+ strongSelf = nil
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ // 优化的人脸检测方法
|
|
|
+ private func optimizedFaceDetection(in image: UIImage, request: VNDetectFaceRectanglesRequest, completion: @escaping (Bool) -> Void) {
|
|
|
+ guard let cgImage = image.cgImage else {
|
|
|
+ completion(false)
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ // 在后台线程执行检测
|
|
|
+ DispatchQueue.global(qos: .userInitiated).async {
|
|
|
+ autoreleasepool {
|
|
|
+ let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
|
|
|
+
|
|
|
+ do {
|
|
|
+ try handler.perform([request])
|
|
|
+ let hasFace = request.results?.isEmpty == false
|
|
|
+ completion(hasFace)
|
|
|
+ } catch {
|
|
|
+ print("人脸检测失败: \(error)")
|
|
|
+ completion(false)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+// func classifyByPeople(assets: PHFetchResult<PHAsset>,
|
|
|
+// completion: @escaping ([String: [PHAsset]]) -> Void) {
|
|
|
+// var peopleGroups: [String: [PHAsset]] = [:]
|
|
|
+// let group = DispatchGroup()
|
|
|
+//
|
|
|
+// // 创建专用队列和信号量控制并发
|
|
|
+// let processingQueue = DispatchQueue(label: "com.app.peopleDetection", qos: .userInitiated, attributes: .concurrent)
|
|
|
+// let resultQueue = DispatchQueue(label: "com.app.peopleResult", qos: .userInitiated)
|
|
|
+// let semaphore = DispatchSemaphore(value: 4) // 限制并发数
|
|
|
+//
|
|
|
+// // 创建进度追踪
|
|
|
+// var processedCount = 0
|
|
|
+// let totalCount = assets.count
|
|
|
+//
|
|
|
+// // 分批处理,每批处理一部分数据
|
|
|
+// let batchSize = 50
|
|
|
+// let batches = Int(ceil(Float(assets.count) / Float(batchSize)))
|
|
|
+//
|
|
|
+// for batchIndex in 0..<batches {
|
|
|
+// let startIndex = batchIndex * batchSize
|
|
|
+// let endIndex = min(startIndex + batchSize, assets.count)
|
|
|
+//
|
|
|
+// // 使用自动释放池减少内存占用
|
|
|
+// autoreleasepool {
|
|
|
+// for i in startIndex..<endIndex {
|
|
|
+// let asset = assets[i]
|
|
|
+// group.enter()
|
|
|
+// semaphore.wait()
|
|
|
+//
|
|
|
+// // 降低处理图片的分辨率
|
|
|
+// let options = PHImageRequestOptions()
|
|
|
+// options.deliveryMode = .fastFormat
|
|
|
+// options.isSynchronous = false
|
|
|
+// options.resizeMode = .fast
|
|
|
+//
|
|
|
+// processingQueue.async {
|
|
|
+// // 使用自动释放池减少内存占用
|
|
|
+// autoreleasepool {
|
|
|
+// let _ = PHImageManager.default().requestImage(
|
|
|
+// for: asset,
|
|
|
+// targetSize: CGSize(width: 128, height: 128), // 降低分辨率
|
|
|
+// contentMode: .aspectFit,
|
|
|
+// options: options
|
|
|
+// ) { image, _ in
|
|
|
+// defer {
|
|
|
+// semaphore.signal()
|
|
|
+// }
|
|
|
+//
|
|
|
+// guard let image = image else {
|
|
|
+// group.leave()
|
|
|
+// return
|
|
|
+// }
|
|
|
+//
|
|
|
+// // 使用 Vision 框架检测人脸
|
|
|
+// guard let ciImage = CIImage(image: image) else {
|
|
|
+// group.leave()
|
|
|
+// return
|
|
|
+// }
|
|
|
+//
|
|
|
+// let request = VNDetectFaceRectanglesRequest()
|
|
|
+// let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])
|
|
|
+//
|
|
|
+// do {
|
|
|
+// try handler.perform([request])
|
|
|
+// if let results = request.results, !results.isEmpty {
|
|
|
+// // 检测到人脸,添加到数组
|
|
|
+// resultQueue.async {
|
|
|
+// if peopleGroups["包含人脸的照片"] == nil {
|
|
|
+// peopleGroups["包含人脸的照片"] = []
|
|
|
+// }
|
|
|
+// peopleGroups["包含人脸的照片"]?.append(asset)
|
|
|
+// }
|
|
|
+// }
|
|
|
+// } catch {
|
|
|
+// print("人脸检测失败: \(error)")
|
|
|
+// }
|
|
|
+//
|
|
|
+// // 更新进度
|
|
|
+// resultQueue.async {
|
|
|
+// processedCount += 1
|
|
|
+// let progress = Float(processedCount) / Float(totalCount)
|
|
|
+// if processedCount % 100 == 0 || processedCount == totalCount {
|
|
|
+// DispatchQueue.main.async {
|
|
|
+// print("人脸检测进度: \(Int(progress * 100))%")
|
|
|
+// }
|
|
|
+// }
|
|
|
+// }
|
|
|
+//
|
|
|
+// group.leave()
|
|
|
+// }
|
|
|
+// }
|
|
|
+// }
|
|
|
+// }
|
|
|
+// }
|
|
|
+//
|
|
|
+// // 每批处理完后清理内存
|
|
|
+// cleanupMemory()
|
|
|
+// }
|
|
|
+//
|
|
|
+// // 等待所有检测完成后更新结果
|
|
|
+// group.notify(queue: .main) {
|
|
|
+// completion(peopleGroups)
|
|
|
+// }
|
|
|
+// }
|
|
|
+
|
|
|
+// // 添加内存清理方法(如果还没有)
|
|
|
+// private func cleanupMemory() {
|
|
|
+// // 强制清理内存
|
|
|
+// autoreleasepool {
|
|
|
+// // 触发内存警告,促使系统回收内存
|
|
|
+// UIApplication.shared.performSelector(onMainThread: #selector(UIApplication.beginIgnoringInteractionEvents), with: nil, waitUntilDone: true)
|
|
|
+// UIApplication.shared.performSelector(onMainThread: #selector(UIApplication.endIgnoringInteractionEvents), with: nil, waitUntilDone: true)
|
|
|
+// }
|
|
|
+// }
|
|
|
+
|
|
|
// 按人物分类
|
|
|
// func classifyByPeople(assets: PHFetchResult<PHAsset>,
|
|
|
// completion: @escaping ([String: [PHAsset]]) -> Void) {
|
|
|
@@ -749,10 +942,10 @@ class ClassifyPhoto {
|
|
|
// 使用更小的采样区域
|
|
|
let width = cgImage.width
|
|
|
let height = cgImage.height
|
|
|
- let stride = 4 // 增加步长,减少处理像素数
|
|
|
+ let pixelStride = 4 // 改名为pixelStride,避免与函数名冲突
|
|
|
|
|
|
// 提前检查图像尺寸是否合法
|
|
|
- guard width > (2 * stride), height > (2 * stride) else {
|
|
|
+ guard width > (2 * pixelStride), height > (2 * pixelStride) else {
|
|
|
return false
|
|
|
}
|
|
|
|
|
|
@@ -779,16 +972,21 @@ class ClassifyPhoto {
|
|
|
var sampledPixels = 0
|
|
|
|
|
|
// 只采样图像的一部分区域
|
|
|
- let sampleRows = min(10, height / stride)
|
|
|
- let sampleCols = min(10, width / stride)
|
|
|
+ let sampleRows = 10
|
|
|
+ let sampleCols = 10
|
|
|
|
|
|
- for y in stride(from: stride, to: height - stride, by: stride * sampleRows / 10) {
|
|
|
- for x in stride(from: stride, to: width - stride, by: stride * sampleCols / 10) {
|
|
|
+ // 计算步长
|
|
|
+ let rowStep = max(1, height / sampleRows)
|
|
|
+ let colStep = max(1, width / sampleCols)
|
|
|
+
|
|
|
+ // 使用Swift的stride函数
|
|
|
+ for y in Swift.stride(from: pixelStride, to: height - pixelStride, by: rowStep) {
|
|
|
+ for x in Swift.stride(from: pixelStride, to: width - pixelStride, by: colStep) {
|
|
|
let current = Int(buffer[y * width + x])
|
|
|
- let left = Int(buffer[y * width + (x - stride)])
|
|
|
- let right = Int(buffer[y * width + (x + stride)])
|
|
|
- let top = Int(buffer[(y - stride) * width + x])
|
|
|
- let bottom = Int(buffer[(y + stride) * width + x])
|
|
|
+ let left = Int(buffer[y * width + (x - pixelStride)])
|
|
|
+ let right = Int(buffer[y * width + (x + pixelStride)])
|
|
|
+ let top = Int(buffer[(y - pixelStride) * width + x])
|
|
|
+ let bottom = Int(buffer[(y + pixelStride) * width + x])
|
|
|
|
|
|
// 简化的边缘检测
|
|
|
let dx = abs(left - right)
|
|
|
@@ -808,6 +1006,8 @@ class ClassifyPhoto {
|
|
|
let threshold = 15.0
|
|
|
return normalizedScore < threshold
|
|
|
}
|
|
|
+
|
|
|
+ // ... existing code ...
|
|
|
|
|
|
// func detectBlurryPhotos(from assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
|
|
|
// var blurryPhotos: [PHAsset] = []
|