| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060 |
- import Photos
- import Vision
- class ClassifyPhoto {
-
- struct PhotoSizeInfo {
- var totalSize: Int64 = 0
- var count: Int = 0
- }
- struct ClassifiedPhotos {
- var screenshots: [PHAsset] = []
- var locations: [String: [PHAsset]] = [:] // 按地点分组
- var people: [String: [PHAsset]] = [:] // 按人物分组
- var similarPhotos: [[PHAsset]] = [] // 存储相似照片组
- var blurryPhotos: [PHAsset] = [] // 添加模糊照片数组
-
- // 添加容量信息
- var screenshotsSize: PhotoSizeInfo = PhotoSizeInfo()
- var locationsSize: PhotoSizeInfo = PhotoSizeInfo()
- var peopleSize: PhotoSizeInfo = PhotoSizeInfo()
- var similarPhotosSize: PhotoSizeInfo = PhotoSizeInfo()
- var blurryPhotosSize: PhotoSizeInfo = PhotoSizeInfo() // 添加模糊照片容量信息
- }
-
- // 添加位置缓存
- private var locationCache: [String: String] = [:]
- func classifyPhotos(
- assets: PHFetchResult<PHAsset>,
- progressHandler: @escaping (String, Float) -> Void,
- completion: @escaping (ClassifiedPhotos) -> Void
- ) {
-
- // 在后台队列处理
- DispatchQueue.global(qos: .userInitiated).async {
- var result = ClassifiedPhotos()
- let group = DispatchGroup()
-
- // 开始处理
- DispatchQueue.main.async {
- progressHandler("正在加载照片...", 0.0)
- }
-
- // 先处理模糊照片检测(占进度的 30%)
- group.enter()
- progressHandler("正在检测模糊照片...", 0.0)
- self.detectBlurryPhotos(from: assets) { blurryPhotos in
- result.blurryPhotos = blurryPhotos
- progressHandler("模糊照片检测完成", 0.3)
- group.leave()
- }
-
- // 1. 检测截图 (占总进度的 20%)
- group.enter()
- self.fetchScreenshots(from: assets) { screenshots in
- result.screenshots = screenshots
- DispatchQueue.main.async {
- progressHandler("正在检测截图...", 0.3)
- }
- group.leave()
- }
-
- // 2. 检测相似照片 (占总进度的 80%)
- group.enter()
- self.detectSimilarPhotos(
- assets: assets,
- progressHandler: { stage, progress in
- // 将相似照片检测的进度映射到 20%-100% 的范围
- let mappedProgress = 0.3 + (progress * 0.6)
- DispatchQueue.main.async {
- progressHandler(stage, mappedProgress)
- }
- }
- ) { similarPhotos in
- result.similarPhotos = similarPhotos
- group.leave()
- }
-
- // 3. 按地点分类 (占总进度的 20%)
- // group.enter()
- // self.classifyByLocation(assets: assets) { locationGroups in
- // result.locations = locationGroups
- // DispatchQueue.main.async {
- // progressHandler("正在按地点分类...", 0.8)
- // }
- // group.leave()
- // }
-
- // 4. 按人物分类 (占总进度的 20%)
- group.enter()
- self.classifyByPeople(assets: assets) { peopleGroups in
- result.people = peopleGroups
- DispatchQueue.main.async {
- progressHandler("正在按人物分类...", 1.0)
- }
- group.leave()
- }
-
- // // 添加模糊照片检测
- // group.enter()
- // self.detectBlurryPhotos(from: assets) { blurryPhotos in
- // result.blurryPhotos = blurryPhotos
- // DispatchQueue.main.async {
- // progressHandler("正在检测模糊照片...", 1.0)
- // }
- // group.leave()
- // }
-
- // 在所有分类完成后计算大小
- group.notify(queue: .main) {
- let sizeGroup = DispatchGroup()
-
- // 计算模糊照片大小
- sizeGroup.enter()
- self.calculateAssetsSize(result.blurryPhotos) { sizeInfo in
- result.blurryPhotosSize = sizeInfo
- sizeGroup.leave()
- }
-
-
- // 计算相似照片大小
- sizeGroup.enter()
- let similarAssets = Array(result.similarPhotos.flatMap { $0 })
- self.calculateAssetsSize(similarAssets) { sizeInfo in
- result.similarPhotosSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 计算截图大小
- sizeGroup.enter()
- self.calculateAssetsSize(result.screenshots) { sizeInfo in
- result.screenshotsSize = sizeInfo
- sizeGroup.leave()
- }
-
- // // 计算地点照片大小
- // sizeGroup.enter()
- // let locationAssets = Array(result.locations.values.flatMap { $0 })
- // self.calculateAssetsSize(locationAssets) { sizeInfo in
- // result.locationsSize = sizeInfo
- // sizeGroup.leave()
- // }
-
- // 计算人物照片大小
- sizeGroup.enter()
- let peopleAssets = Array(result.people.values.flatMap { $0 })
- self.calculateAssetsSize(peopleAssets) { sizeInfo in
- result.peopleSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 所有大小计算完成后回调
- sizeGroup.notify(queue: .main) {
- progressHandler("分类完成", 1.0)
- completion(result)
- }
- }
- }
- }
-
- // 添加内存清理辅助方法
- private func cleanupMemory() {
- // 清理图像缓存
- URLCache.shared.removeAllCachedResponses()
-
- // 强制进行一次垃圾回收
- autoreleasepool {
- let _ = [String](repeating: "temp", count: 1)
- }
-
- #if os(iOS)
- // 发送低内存警告
- UIApplication.shared.perform(Selector(("_performMemoryWarning")))
- #endif
- }
- func detectSimilarPhotos(
- assets: PHFetchResult<PHAsset>,
- progressHandler: @escaping (String, Float) -> Void,
- completion: @escaping ([[PHAsset]]) -> Void
- ) {
- var similarGroups: [[PHAsset]] = []
- let group = DispatchGroup()
-
- if #available(iOS 13.0, *) {
- var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = []
-
- // 创建处理队列
- let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated)
- let semaphore = DispatchSemaphore(value: 4) // 增加并发数以提高效率
-
- // 1. 提取所有图片的特征
- let totalAssets = assets.count
- var processedAssets = 0
-
- progressHandler("正在加载照片...", 0.0)
-
- for i in 0..<assets.count {
- let asset = assets[i]
- group.enter()
- semaphore.wait()
-
- let options = PHImageRequestOptions()
- options.deliveryMode = .fastFormat // 使用快速模式
- options.isSynchronous = false
- options.resizeMode = .fast
-
- DispatchQueue.global(qos: .background).async {
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 128, height: 128), // 降低分辨率
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- defer {
- semaphore.signal()
- }
-
- guard let image = image,
- let cgImage = image.cgImage else {
- group.leave()
- return
- }
-
- processingQueue.async {
- do {
- let requestHandler = VNImageRequestHandler(cgImage: cgImage, options: [:])
- let request = VNGenerateImageFeaturePrintRequest()
- try requestHandler.perform([request])
-
- if let result = request.results?.first as? VNFeaturePrintObservation {
- imageFeatures.append((asset, result))
-
- // 更新特征提取进度
- processedAssets += 1
- let progress = Float(processedAssets) / Float(totalAssets)
- progressHandler("正在提取特征...", progress * 0.6)
- }
- } catch {
- print("特征提取失败: \(error)")
- }
- group.leave()
- }
- }
- }
- }
-
- group.notify(queue: processingQueue) {
- progressHandler("正在比较相似度...", 0.6)
-
- // 近似度
- let similarityThreshold: Float = 0.7
- var similarGroups: [[PHAsset]] = []
-
- // 使用并行处理来加速比较
- let processingGroup = DispatchGroup()
- let processingQueue = DispatchQueue(label: "com.yourapp.similarity.processing", attributes: .concurrent)
- let resultsQueue = DispatchQueue(label: "com.yourapp.similarity.results")
- let semaphore = DispatchSemaphore(value: 4) // 减少并发数量
-
- // 创建一个线程安全的数据结构来存储结果
- var processedIndices = Atomic<Set<Int>>(Set<Int>())
- var groupResults = Atomic<[Int: [PHAsset]]>([:])
-
- // 分批处理,每批处理一部分数据
- let batchSize = min(50, imageFeatures.count)
- // 修复 Float 转换错误
- let batchCount = Float(imageFeatures.count) / Float(batchSize)
- let batches = batchCount.isFinite ? Int(ceil(batchCount)) : 1
-
- for batchIndex in 0..<batches {
- let startIndex = batchIndex * batchSize
- let endIndex = min(startIndex + batchSize, imageFeatures.count)
-
- for i in startIndex..<endIndex {
- // 检查是否已处理
- if processedIndices.value.contains(i) { continue }
-
- semaphore.wait()
- processingGroup.enter()
-
- processingQueue.async {
- // 再次检查,因为可能在等待期间被其他线程处理
- if processedIndices.value.contains(i) {
- semaphore.signal()
- processingGroup.leave()
- return
- }
-
- var similarAssets: [PHAsset] = [imageFeatures[i].asset]
- processedIndices.mutate { $0.insert(i) }
-
- for j in (i + 1)..<imageFeatures.count {
- // 检查是否已处理
- if processedIndices.value.contains(j) { continue }
-
- do {
- var distance: Float = 0
- try imageFeatures[i].feature.computeDistance(&distance, to: imageFeatures[j].feature)
-
- // 检查距离值是否有效
- if distance.isNaN || distance.isInfinite {
- print("警告: 检测到无效的距离值")
- continue
- }
-
- // 确保距离在有效范围内
- distance = max(0, min(1, distance))
-
- let similarity = 1 - distance
- if similarity >= similarityThreshold {
- similarAssets.append(imageFeatures[j].asset)
- processedIndices.mutate { $0.insert(j) }
- }
- } catch {
- print("相似度计算失败: \(error)")
- }
- }
-
- // 只保存有多个相似图像的组
- if similarAssets.count > 1 {
- resultsQueue.async {
- groupResults.mutate { $0[i] = similarAssets }
- }
- }
-
- // 更新进度 - 添加安全检查
- if imageFeatures.count > 0 {
- let processedCount = Float(processedIndices.value.count)
- let totalCount = Float(imageFeatures.count)
-
- // 确保进度值有效
- var progress: Float = 0
- if processedCount.isFinite && totalCount.isFinite && totalCount > 0 {
- progress = processedCount / totalCount
- // 限制进度范围
- progress = max(0, min(1, progress))
- }
-
- DispatchQueue.main.async {
- progressHandler("正在比较相似度...", 0.6 + progress * 0.4)
- }
- }
-
- semaphore.signal()
- processingGroup.leave()
- }
- }
- }
-
- processingGroup.wait()
-
- // 整理结果
- similarGroups = Array(groupResults.value.values)
-
- // 按照照片数量降序排序
- similarGroups.sort { $0.count > $1.count }
-
- DispatchQueue.main.async {
- completion(similarGroups)
- }
- }
- }
- }
- func classifyByLocation(assets: PHFetchResult<PHAsset>,
- completion: @escaping ([String: [PHAsset]]) -> Void) {
- var locationGroups: [String: [PHAsset]] = [:]
- let group = DispatchGroup()
- let geocodeQueue = DispatchQueue(label: "com.app.geocoding")
- let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数
- assets.enumerateObjects { asset, _, _ in
- if let location = asset.location {
- group.enter()
- semaphore.wait()
-
- geocodeQueue.async {
- let geocoder = CLGeocoder()
- geocoder.reverseGeocodeLocation(location) { placemarks, error in
- defer {
- semaphore.signal()
- group.leave()
- }
-
- if let placemark = placemarks?.first {
- let locationName = self.formatLocationName(placemark)
- DispatchQueue.main.async {
- if locationGroups[locationName] == nil {
- locationGroups[locationName] = []
- }
- locationGroups[locationName]?.append(asset)
- }
- }
- }
- }
- }
- }
- // 等待所有地理编码完成后回调
- group.notify(queue: .main) {
- completion(locationGroups)
- }
- }
- // 格式化地点名称(只返回城市名)
- func formatLocationName(_ placemark: CLPlacemark) -> String {
- if let city = placemark.locality {
- return city
- } else if let area = placemark.administrativeArea {
- return area
- }
- return "其他"
- }
-
- func classifyByPeople(assets: PHFetchResult<PHAsset>,
- completion: @escaping ([String: [PHAsset]]) -> Void) {
- var peopleGroups: [String: [PHAsset]] = [:]
- let group = DispatchGroup()
-
- // 创建专用队列和信号量控制并发
- let processingQueue = DispatchQueue(label: "com.app.peopleDetection", attributes: .concurrent)
- let resultQueue = DispatchQueue(label: "com.app.peopleResult")
- let semaphore = DispatchSemaphore(value: 4) // 限制并发数
-
- // 创建进度追踪
- var processedCount = 0
- let totalCount = assets.count
-
- // 分批处理,每批处理一部分数据
- let batchSize = 50
- let batches = Int(ceil(Float(assets.count) / Float(batchSize)))
-
- for batchIndex in 0..<batches {
- let startIndex = batchIndex * batchSize
- let endIndex = min(startIndex + batchSize, assets.count)
-
- // 使用自动释放池减少内存占用
- autoreleasepool {
- for i in startIndex..<endIndex {
- let asset = assets[i]
- group.enter()
- semaphore.wait()
-
- // 降低处理图片的分辨率
- let options = PHImageRequestOptions()
- options.deliveryMode = .fastFormat
- options.isSynchronous = false
- options.resizeMode = .fast
-
- processingQueue.async {
- // 使用自动释放池减少内存占用
- autoreleasepool {
- let result = PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 128, height: 128), // 降低分辨率
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- defer {
- semaphore.signal()
- }
-
- guard let image = image else {
- group.leave()
- return
- }
-
- // 使用 Vision 框架检测人脸
- guard let ciImage = CIImage(image: image) else {
- group.leave()
- return
- }
-
- let request = VNDetectFaceRectanglesRequest()
- let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])
-
- do {
- try handler.perform([request])
- if let results = request.results, !results.isEmpty {
- // 检测到人脸,添加到数组
- resultQueue.async {
- if peopleGroups["包含人脸的照片"] == nil {
- peopleGroups["包含人脸的照片"] = []
- }
- peopleGroups["包含人脸的照片"]?.append(asset)
- }
- }
- } catch {
- print("人脸检测失败: \(error)")
- }
-
- // 更新进度
- resultQueue.async {
- processedCount += 1
- let progress = Float(processedCount) / Float(totalCount)
- DispatchQueue.main.async {
- print("人脸检测进度: \(Int(progress * 100))%")
- }
- }
-
- group.leave()
- }
- }
- }
- }
- }
-
- // 每批处理完后清理内存
- cleanupMemory()
- }
-
- // 等待所有检测完成后更新结果
- group.notify(queue: .main) {
- completion(peopleGroups)
- }
- }
- // 按人物分类
- // func classifyByPeople(assets: PHFetchResult<PHAsset>,
- // completion: @escaping ([String: [PHAsset]]) -> Void) {
- // var peopleGroups: [String: [PHAsset]] = [:]
- // let group = DispatchGroup()
- //
- // DispatchQueue.global(qos: .background).async {
- // // 创建一个数组来存储检测到人脸的照片
- // var facesArray: [PHAsset] = []
- //
- // // 遍历所有照片
- // assets.enumerateObjects { asset, _, _ in
- // group.enter()
- //
- // // 获取照片的缩略图进行人脸检测
- // let options = PHImageRequestOptions()
- // options.isSynchronous = false
- // options.deliveryMode = .fastFormat
- //
- // PHImageManager.default().requestImage(
- // for: asset,
- // targetSize: CGSize(width: 128, height: 128), // 使用较小的尺寸提高性能
- // contentMode: .aspectFit,
- // options: options
- // ) { image, _ in
- // guard let image = image else {
- // group.leave()
- // return
- // }
- //
- // // 使用 Vision 框架检测人脸
- // guard let ciImage = CIImage(image: image) else {
- // group.leave()
- // return
- // }
- //
- // let request = VNDetectFaceRectanglesRequest()
- // let handler = VNImageRequestHandler(ciImage: ciImage)
- //
- // do {
- // try handler.perform([request])
- // if let results = request.results, !results.isEmpty {
- // // 检测到人脸,添加到数组
- // DispatchQueue.main.async {
- // facesArray.append(asset)
- // }
- // }
- // } catch {
- // print("人脸检测失败: \(error)")
- // }
- //
- // group.leave()
- // }
- // }
- //
- // // 等待所有检测完成后更新结果
- // group.notify(queue: .main) {
- // if !facesArray.isEmpty {
- // peopleGroups["包含人脸的照片"] = facesArray
- // }
- // completion(peopleGroups)
- // }
- // }
- // }
- // 识别截图
- func fetchScreenshots(from assets: PHFetchResult<PHAsset>,
- completion: @escaping ([PHAsset]) -> Void) {
- var screenshots: [PHAsset] = []
- // 获取系统的截图智能相册
- let screenshotAlbums = PHAssetCollection.fetchAssetCollections(
- with: .smartAlbum,
- subtype: .smartAlbumScreenshots,
- options: nil
- )
- // 从截图相册中获取所有截图
- screenshotAlbums.enumerateObjects { collection, _, _ in
- let fetchOptions = PHFetchOptions()
- let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions)
- screenshotAssets.enumerateObjects { asset, _, _ in
- screenshots.append(asset)
- }
- }
- completion(screenshots)
- }
-
- // 修改辅助方法以接受 PHFetchResult<PHAsset>
- // private func detectScreenshots(assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
- // let processingQueue = DispatchQueue(label: "com.yourapp.screenshots.processing", attributes: .concurrent)
- // let resultQueue = DispatchQueue(label: "com.yourapp.screenshots.results")
- // let group = DispatchGroup()
- // let semaphore = DispatchSemaphore(value: 4) // 限制并发数
- //
- // let screenshots = Atomic<[PHAsset]>([])
- //
- // // 分批处理
- // let totalCount = assets.count
- // let batchSize = 50
- // let batches = Int(ceil(Float(totalCount) / Float(batchSize)))
- //
- // for batchIndex in 0..<batches {
- // let startIndex = batchIndex * batchSize
- // let endIndex = min(startIndex + batchSize, totalCount)
- //
- // processingQueue.async {
- // autoreleasepool {
- // for i in startIndex..<endIndex {
- // semaphore.wait()
- // group.enter()
- //
- // let asset = assets.object(at: i)
- //
- // // 检测是否为截图的逻辑
- // // ...
- //
- // // 模拟检测逻辑
- // let isScreenshot = asset.pixelWidth == asset.pixelHeight * 16 / 9 ||
- // asset.pixelHeight == asset.pixelWidth * 16 / 9
- //
- // if isScreenshot {
- // resultQueue.async {
- // screenshots.mutate { $0.append(asset) }
- // }
- // }
- //
- // semaphore.signal()
- // group.leave()
- // }
- // }
- // }
- // }
- //
- // group.notify(queue: .main) {
- // completion(screenshots.value)
- // }
- // }
-
- // ... existing code ...
- func detectBlurryPhotos(from assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
- var blurryPhotos: [PHAsset] = []
- let group = DispatchGroup()
- let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent)
- let resultQueue = DispatchQueue(label: "com.app.blurryResult")
- let semaphore = DispatchSemaphore(value: 8) // 增加并发数
-
- // 创建进度追踪
- var processedCount = 0
- let totalCount = assets.count
-
- // 分批处理,每批处理一部分数据
- let batchSize = 50
- let batches = Int(ceil(Float(assets.count) / Float(batchSize)))
-
- for batchIndex in 0..<batches {
- let startIndex = batchIndex * batchSize
- let endIndex = min(startIndex + batchSize, assets.count)
-
- autoreleasepool {
- for i in startIndex..<endIndex {
- let asset = assets[i]
- group.enter()
- semaphore.wait()
-
- let options = PHImageRequestOptions()
- options.deliveryMode = .fastFormat // 使用快速模式
- options.isSynchronous = false
- options.resizeMode = .fast
-
- // 进一步降低处理图片的分辨率
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 64, height: 64), // 降低分辨率到64x64
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- defer {
- semaphore.signal()
- }
-
- guard let image = image else {
- group.leave()
- return
- }
-
- processingQueue.async {
- // 使用更高效的模糊检测
- let isBlurry = self.fastBlurCheck(image)
-
- if isBlurry {
- resultQueue.async {
- blurryPhotos.append(asset)
- }
- }
-
- // 更新进度
- resultQueue.async {
- processedCount += 1
- let progress = Float(processedCount) / Float(totalCount)
- if processedCount % 100 == 0 || processedCount == totalCount {
- DispatchQueue.main.async {
- print("模糊检测进度: \(Int(progress * 100))%")
- }
- }
- }
-
- group.leave()
- }
- }
- }
- }
-
- // 每批处理完后清理内存
- cleanupMemory()
- }
-
- group.notify(queue: .main) {
- completion(blurryPhotos)
- }
- }
- // 更高效的模糊检测方法
- private func fastBlurCheck(_ image: UIImage) -> Bool {
- guard let cgImage = image.cgImage else { return false }
-
- // 使用更小的采样区域
- let width = cgImage.width
- let height = cgImage.height
- let stride = 4 // 增加步长,减少处理像素数
-
- // 提前检查图像尺寸是否合法
- guard width > (2 * stride), height > (2 * stride) else {
- return false
- }
-
- // 使用vImage进行快速处理
- var buffer = [UInt8](repeating: 0, count: width * height)
-
- let colorSpace = CGColorSpaceCreateDeviceGray()
- guard let context = CGContext(
- data: &buffer,
- width: width,
- height: height,
- bitsPerComponent: 8,
- bytesPerRow: width,
- space: colorSpace,
- bitmapInfo: CGImageAlphaInfo.none.rawValue
- ) else {
- return false
- }
-
- context.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
-
- // 使用拉普拉斯算子的简化版本
- var score: Double = 0
- var sampledPixels = 0
-
- // 只采样图像的一部分区域
- let sampleRows = min(10, height / stride)
- let sampleCols = min(10, width / stride)
-
- for y in stride(from: stride, to: height - stride, by: stride * sampleRows / 10) {
- for x in stride(from: stride, to: width - stride, by: stride * sampleCols / 10) {
- let current = Int(buffer[y * width + x])
- let left = Int(buffer[y * width + (x - stride)])
- let right = Int(buffer[y * width + (x + stride)])
- let top = Int(buffer[(y - stride) * width + x])
- let bottom = Int(buffer[(y + stride) * width + x])
-
- // 简化的边缘检测
- let dx = abs(left - right)
- let dy = abs(top - bottom)
- score += Double(max(dx, dy))
- sampledPixels += 1
- }
- }
-
- // 避免除以零
- guard sampledPixels > 0 else { return false }
-
- // 归一化分数
- let normalizedScore = score / Double(sampledPixels)
-
- // 调整阈值
- let threshold = 15.0
- return normalizedScore < threshold
- }
-
- // func detectBlurryPhotos(from assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
- // var blurryPhotos: [PHAsset] = []
- // let group = DispatchGroup()
- // let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent)
- // let resultQueue = DispatchQueue(label: "com.app.blurryResult")
- // let semaphore = DispatchSemaphore(value: 5) // 增加并发数
- //
- // // 创建进度追踪
- // var processedCount = 0
- // let totalCount = assets.count
- //
- // for i in 0..<assets.count {
- // let asset = assets[i]
- // group.enter()
- // semaphore.wait()
- //
- // let options = PHImageRequestOptions()
- // options.deliveryMode = .fastFormat // 使用快速模式
- // options.isSynchronous = false
- // options.resizeMode = .fast
- //
- // // 降低处理图片的分辨率
- // PHImageManager.default().requestImage(
- // for: asset,
- // targetSize: CGSize(width: 128, height: 128), // 降低分辨率
- // contentMode: .aspectFit,
- // options: options
- // ) { image, _ in
- // defer {
- // semaphore.signal()
- // }
- //
- // guard let image = image,
- // let cgImage = image.cgImage else {
- // group.leave()
- // return
- // }
- //
- // processingQueue.async {
- // // 快速模糊检测
- // let isBlurry = self.quickBlurCheck(cgImage)
- //
- // if isBlurry {
- // resultQueue.async {
- // blurryPhotos.append(asset)
- // }
- // }
- //
- // // 更新进度
- // resultQueue.async {
- // processedCount += 1
- // let progress = Float(processedCount) / Float(totalCount)
- // DispatchQueue.main.async {
- // print("模糊检测进度: \(Int(progress * 100))%")
- // }
- // }
- //
- // group.leave()
- // }
- // }
- // }
- //
- // group.notify(queue: .main) {
- // completion(blurryPhotos)
- // }
- // }
- //
- // // 快速模糊检测方法
- // private func quickBlurCheck(_ image: CGImage) -> Bool {
- //
- // let width = image.width
- // let height = image.height
- // let stride = 2 // 跳过一些像素以加快速度
- //
- // // 提前检查图像尺寸是否合法
- // guard width > (2 * stride), height > (2 * stride) else {
- // return false // 小尺寸图像直接判定为模糊或清晰
- // }
- //
- // var buffer = [UInt8](repeating: 0, count: width * height)
- //
- // let colorSpace = CGColorSpaceCreateDeviceGray()
- // guard let context = CGContext(
- // data: &buffer,
- // width: width,
- // height: height,
- // bitsPerComponent: 8,
- // bytesPerRow: width,
- // space: colorSpace,
- // bitmapInfo: CGImageAlphaInfo.none.rawValue
- // ) else {
- // return false
- // }
- //
- // context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
- //
- // // 使用简化的拉普拉斯算子
- // var score: Double = 0
- //
- // for y in stride..<(height-stride) where y % stride == 0 {
- // for x in stride..<(width-stride) where x % stride == 0 {
- // let current = Int(buffer[y * width + x])
- // let left = Int(buffer[y * width + (x - stride)])
- // let right = Int(buffer[y * width + (x + stride)])
- // let top = Int(buffer[(y - stride) * width + x])
- // let bottom = Int(buffer[(y + stride) * width + x])
- //
- // // 简化的边缘检测
- // let dx = abs(left - right)
- // let dy = abs(top - bottom)
- // score += Double(max(dx, dy))
- // }
- // }
- //
- // // 归一化分数
- // let normalizedScore = score / Double((width * height) / (stride * stride))
- //
- // // 调整阈值(可能需要根据实际效果调整)
- // let threshold = 20.0
- // return normalizedScore < threshold
- // }
- }
- extension ClassifyPhoto {
-
- // 获取资源大小的辅助方法
- func getAssetSize(_ asset: PHAsset, completion: @escaping (Int64) -> Void) {
- DispatchQueue.global(qos: .background).async {
- let resources = PHAssetResource.assetResources(for: asset)
- if let resource = resources.first {
- var size: Int64 = 0
- if let unsignedInt64 = resource.value(forKey: "fileSize") as? CLong {
- size = Int64(unsignedInt64)
- }
- DispatchQueue.main.async {
- completion(size)
- }
- } else {
- DispatchQueue.main.async {
- completion(0)
- }
- }
- }
- }
- // 计算资产组的总大小
- func calculateAssetsSize(_ assets: [PHAsset], completion: @escaping (PhotoSizeInfo) -> Void) {
- print("正在计算图片组容量大小")
- let group = DispatchGroup()
- var totalSize: Int64 = 0
-
- for asset in assets {
- group.enter()
- getAssetSize(asset) { size in
- totalSize += size
- group.leave()
- }
- }
-
- group.notify(queue: .main) {
- completion(PhotoSizeInfo(totalSize: totalSize, count: assets.count))
- }
- }
- }
- extension ClassifyPhoto {
-
- // 添加一个处理 P3 色彩空间图像的辅助方法
- private func processImageWithSafeColorSpace(_ image: UIImage) -> UIImage? {
- autoreleasepool {
- guard let cgImage = image.cgImage else { return image }
-
- // 检查色彩空间
- if let colorSpace = cgImage.colorSpace,
- (colorSpace.name as String?) == CGColorSpace.displayP3 as String {
-
- // 转换为 sRGB 色彩空间
- let sRGBColorSpace = CGColorSpaceCreateDeviceRGB()
- if let context = CGContext(
- data: nil,
- width: cgImage.width,
- height: cgImage.height,
- bitsPerComponent: 8,
- bytesPerRow: 0,
- space: sRGBColorSpace,
- bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue
- ) {
- context.draw(cgImage, in: CGRect(x: 0, y: 0, width: cgImage.width, height: cgImage.height))
- if let convertedImage = context.makeImage() {
- return UIImage(cgImage: convertedImage, scale: image.scale, orientation: image.imageOrientation)
- }
- }
- }
-
- return image
- }
- }
- // 修改图像请求方法,添加色彩空间处理
- private func requestImageWithSafeProcessing(
- for asset: PHAsset,
- targetSize: CGSize,
- contentMode: PHImageContentMode,
- options: PHImageRequestOptions?,
- completion: @escaping (UIImage?) -> Void
- ) {
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: targetSize,
- contentMode: contentMode,
- options: options
- ) { image, info in
- guard let image = image else {
- completion(nil)
- return
- }
-
- // 处理可能的 P3 色彩空间图像
- DispatchQueue.global(qos: .userInitiated).async {
- let processedImage = self.processImageWithSafeColorSpace(image)
- DispatchQueue.main.async {
- completion(processedImage)
- }
- }
- }
- }
- }
- class Atomic<T> {
- private var value_: T
- private let lock = NSLock()
-
- init(_ value: T) {
- self.value_ = value
- }
-
- var value: T {
- lock.lock()
- defer { lock.unlock() }
- return value_
- }
-
- func mutate(_ mutation: (inout T) -> Void) {
- lock.lock()
- defer { lock.unlock() }
- mutation(&value_)
- }
- }
|