ClassifyPhoto.swift 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. import Photos
  2. import Vision
  3. class ClassifyPhoto {
  4. struct PhotoSizeInfo {
  5. var totalSize: Int64 = 0
  6. var count: Int = 0
  7. }
  8. struct ClassifiedPhotos {
  9. var screenshots: [PHAsset] = []
  10. var locations: [String: [PHAsset]] = [:] // 按地点分组
  11. var people: [String: [PHAsset]] = [:] // 按人物分组
  12. var similarPhotos: [[PHAsset]] = [] // 存储相似照片组
  13. var blurryPhotos: [PHAsset] = [] // 添加模糊照片数组
  14. // 添加容量信息
  15. var screenshotsSize: PhotoSizeInfo = PhotoSizeInfo()
  16. var locationsSize: PhotoSizeInfo = PhotoSizeInfo()
  17. var peopleSize: PhotoSizeInfo = PhotoSizeInfo()
  18. var similarPhotosSize: PhotoSizeInfo = PhotoSizeInfo()
  19. var blurryPhotosSize: PhotoSizeInfo = PhotoSizeInfo() // 添加模糊照片容量信息
  20. }
  21. // 添加位置缓存
  22. private var locationCache: [String: String] = [:]
  23. func classifyPhotos(
  24. assets: PHFetchResult<PHAsset>,
  25. progressHandler: @escaping (String, Float) -> Void,
  26. completion: @escaping (ClassifiedPhotos) -> Void
  27. ) {
  28. // 在后台队列处理
  29. DispatchQueue.global(qos: .userInitiated).async {
  30. var result = ClassifiedPhotos()
  31. let group = DispatchGroup()
  32. // 开始处理
  33. DispatchQueue.main.async {
  34. progressHandler("正在加载照片...", 0.0)
  35. }
  36. // 先处理模糊照片检测(占进度的 30%)
  37. group.enter()
  38. progressHandler("正在检测模糊照片...", 0.0)
  39. self.detectBlurryPhotos(from: assets) { blurryPhotos in
  40. result.blurryPhotos = blurryPhotos
  41. progressHandler("模糊照片检测完成", 0.3)
  42. group.leave()
  43. }
  44. // 1. 检测截图 (占总进度的 20%)
  45. group.enter()
  46. self.fetchScreenshots(from: assets) { screenshots in
  47. result.screenshots = screenshots
  48. DispatchQueue.main.async {
  49. progressHandler("正在检测截图...", 0.3)
  50. }
  51. group.leave()
  52. }
  53. // 2. 检测相似照片 (占总进度的 80%)
  54. group.enter()
  55. self.detectSimilarPhotos(
  56. assets: assets,
  57. progressHandler: { stage, progress in
  58. // 将相似照片检测的进度映射到 20%-100% 的范围
  59. let mappedProgress = 0.3 + (progress * 0.6)
  60. DispatchQueue.main.async {
  61. progressHandler(stage, mappedProgress)
  62. }
  63. }
  64. ) { similarPhotos in
  65. result.similarPhotos = similarPhotos
  66. group.leave()
  67. }
  68. // 3. 按地点分类 (占总进度的 20%)
  69. group.enter()
  70. self.classifyByLocation(assets: assets) { locationGroups in
  71. result.locations = locationGroups
  72. DispatchQueue.main.async {
  73. progressHandler("正在按地点分类...", 0.8)
  74. }
  75. group.leave()
  76. }
  77. // 4. 按人物分类 (占总进度的 20%)
  78. group.enter()
  79. self.classifyByPeople(assets: assets) { peopleGroups in
  80. result.people = peopleGroups
  81. DispatchQueue.main.async {
  82. progressHandler("正在按人物分类...", 1.0)
  83. }
  84. group.leave()
  85. }
  86. // // 添加模糊照片检测
  87. // group.enter()
  88. // self.detectBlurryPhotos(from: assets) { blurryPhotos in
  89. // result.blurryPhotos = blurryPhotos
  90. // DispatchQueue.main.async {
  91. // progressHandler("正在检测模糊照片...", 1.0)
  92. // }
  93. // group.leave()
  94. // }
  95. // 在所有分类完成后计算大小
  96. group.notify(queue: .main) {
  97. let sizeGroup = DispatchGroup()
  98. // 计算截图大小
  99. sizeGroup.enter()
  100. self.calculateAssetsSize(result.screenshots) { sizeInfo in
  101. result.screenshotsSize = sizeInfo
  102. sizeGroup.leave()
  103. }
  104. // 计算地点照片大小
  105. sizeGroup.enter()
  106. let locationAssets = Array(result.locations.values.flatMap { $0 })
  107. self.calculateAssetsSize(locationAssets) { sizeInfo in
  108. result.locationsSize = sizeInfo
  109. sizeGroup.leave()
  110. }
  111. // 计算人物照片大小
  112. sizeGroup.enter()
  113. let peopleAssets = Array(result.people.values.flatMap { $0 })
  114. self.calculateAssetsSize(peopleAssets) { sizeInfo in
  115. result.peopleSize = sizeInfo
  116. sizeGroup.leave()
  117. }
  118. // 计算相似照片大小
  119. sizeGroup.enter()
  120. let similarAssets = Array(result.similarPhotos.flatMap { $0 })
  121. self.calculateAssetsSize(similarAssets) { sizeInfo in
  122. result.similarPhotosSize = sizeInfo
  123. sizeGroup.leave()
  124. }
  125. // 计算模糊照片大小
  126. sizeGroup.enter()
  127. self.calculateAssetsSize(result.blurryPhotos) { sizeInfo in
  128. result.blurryPhotosSize = sizeInfo
  129. sizeGroup.leave()
  130. }
  131. // 所有大小计算完成后回调
  132. sizeGroup.notify(queue: .main) {
  133. progressHandler("分类完成", 1.0)
  134. completion(result)
  135. }
  136. }
  137. // // 等待所有处理完成
  138. // group.notify(queue: .main) {
  139. // progressHandler("分类完成", 1.0)
  140. // completion(result)
  141. // }
  142. }
  143. }
  144. private func detectSimilarPhotos(
  145. assets: PHFetchResult<PHAsset>,
  146. progressHandler: @escaping (String, Float) -> Void,
  147. completion: @escaping ([[PHAsset]]) -> Void
  148. ) {
  149. var similarGroups: [[PHAsset]] = []
  150. let group = DispatchGroup()
  151. if #available(iOS 13.0, *) {
  152. var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = []
  153. // 创建处理队列
  154. let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated)
  155. let semaphore = DispatchSemaphore(value: 5)
  156. // 1. 提取所有图片的特征
  157. let totalAssets = assets.count
  158. var processedAssets = 0
  159. progressHandler("正在加载照片...", 0.0)
  160. for i in 0..<assets.count {
  161. let asset = assets[i]
  162. group.enter()
  163. semaphore.wait()
  164. let options = PHImageRequestOptions()
  165. options.deliveryMode = .highQualityFormat
  166. options.isSynchronous = false
  167. options.resizeMode = .exact
  168. PHImageManager.default().requestImage(
  169. for: asset,
  170. targetSize: CGSize(width: 256, height: 256),
  171. contentMode: .aspectFit,
  172. options: options
  173. ) { image, _ in
  174. defer {
  175. semaphore.signal()
  176. }
  177. guard let image = image,
  178. let cgImage = image.cgImage else {
  179. group.leave()
  180. return
  181. }
  182. processingQueue.async {
  183. do {
  184. let requestHandler = VNImageRequestHandler(cgImage: cgImage, options: [:])
  185. let request = VNGenerateImageFeaturePrintRequest()
  186. try requestHandler.perform([request])
  187. if let result = request.results?.first as? VNFeaturePrintObservation {
  188. imageFeatures.append((asset, result))
  189. // 更新特征提取进度
  190. processedAssets += 1
  191. let progress = Float(processedAssets) / Float(totalAssets)
  192. progressHandler("正在提取特征...", progress * 0.6)
  193. }
  194. } catch {
  195. print("特征提取失败: \(error)")
  196. }
  197. group.leave()
  198. }
  199. }
  200. }
  201. // 2. 比较特征相似度并分组
  202. group.notify(queue: processingQueue) {
  203. progressHandler("正在比较相似度...", 0.6)
  204. // 近似度
  205. let similarityThreshold: Float = 0.7
  206. var processedComparisons = 0
  207. let totalComparisons = (imageFeatures.count * (imageFeatures.count - 1)) / 2
  208. var processedIndices = Set<Int>()
  209. for i in 0..<imageFeatures.count {
  210. if processedIndices.contains(i) { continue }
  211. var similarGroup: [PHAsset] = [imageFeatures[i].asset]
  212. processedIndices.insert(i)
  213. for j in (i + 1)..<imageFeatures.count {
  214. if processedIndices.contains(j) { continue }
  215. do {
  216. var distance: Float = 0
  217. try imageFeatures[i].feature.computeDistance(&distance, to: imageFeatures[j].feature)
  218. let similarity = 1 - distance
  219. if similarity >= similarityThreshold {
  220. similarGroup.append(imageFeatures[j].asset)
  221. processedIndices.insert(j)
  222. }
  223. // 更新比较进度
  224. processedComparisons += 1
  225. let compareProgress = Float(processedComparisons) / Float(totalComparisons)
  226. progressHandler("正在比较相似度...", 0.6 + compareProgress * 0.4)
  227. } catch {
  228. print("相似度计算失败: \(error)")
  229. }
  230. }
  231. if similarGroup.count > 1 {
  232. similarGroups.append(similarGroup)
  233. }
  234. }
  235. // 按照照片数量降序排序
  236. similarGroups.sort { $0.count > $1.count }
  237. DispatchQueue.main.async {
  238. completion(similarGroups)
  239. }
  240. }
  241. }
  242. }
  243. private func classifyByLocation(assets: PHFetchResult<PHAsset>,
  244. completion: @escaping ([String: [PHAsset]]) -> Void) {
  245. var locationGroups: [String: [PHAsset]] = [:]
  246. let group = DispatchGroup()
  247. let geocodeQueue = DispatchQueue(label: "com.app.geocoding")
  248. let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数
  249. assets.enumerateObjects { asset, _, _ in
  250. if let location = asset.location {
  251. group.enter()
  252. semaphore.wait()
  253. geocodeQueue.async {
  254. let geocoder = CLGeocoder()
  255. geocoder.reverseGeocodeLocation(location) { placemarks, error in
  256. defer {
  257. semaphore.signal()
  258. group.leave()
  259. }
  260. if let placemark = placemarks?.first {
  261. let locationName = self.formatLocationName(placemark)
  262. DispatchQueue.main.async {
  263. if locationGroups[locationName] == nil {
  264. locationGroups[locationName] = []
  265. }
  266. locationGroups[locationName]?.append(asset)
  267. }
  268. }
  269. }
  270. }
  271. }
  272. }
  273. // 等待所有地理编码完成后回调
  274. group.notify(queue: .main) {
  275. completion(locationGroups)
  276. }
  277. }
  278. // 格式化地点名称(只返回城市名)
  279. private func formatLocationName(_ placemark: CLPlacemark) -> String {
  280. if let city = placemark.locality {
  281. return city
  282. } else if let area = placemark.administrativeArea {
  283. return area
  284. }
  285. return "其他"
  286. }
  287. // 按人物分类
  288. private func classifyByPeople(assets: PHFetchResult<PHAsset>,
  289. completion: @escaping ([String: [PHAsset]]) -> Void) {
  290. var peopleGroups: [String: [PHAsset]] = [:]
  291. let group = DispatchGroup()
  292. // 创建一个数组来存储检测到人脸的照片
  293. var facesArray: [PHAsset] = []
  294. // 遍历所有照片
  295. assets.enumerateObjects { asset, _, _ in
  296. group.enter()
  297. // 获取照片的缩略图进行人脸检测
  298. let options = PHImageRequestOptions()
  299. options.isSynchronous = false
  300. options.deliveryMode = .fastFormat
  301. PHImageManager.default().requestImage(
  302. for: asset,
  303. targetSize: CGSize(width: 500, height: 500), // 使用较小的尺寸提高性能
  304. contentMode: .aspectFit,
  305. options: options
  306. ) { image, _ in
  307. guard let image = image else {
  308. group.leave()
  309. return
  310. }
  311. // 使用 Vision 框架检测人脸
  312. guard let ciImage = CIImage(image: image) else {
  313. group.leave()
  314. return
  315. }
  316. let request = VNDetectFaceRectanglesRequest()
  317. let handler = VNImageRequestHandler(ciImage: ciImage)
  318. do {
  319. try handler.perform([request])
  320. if let results = request.results, !results.isEmpty {
  321. // 检测到人脸,添加到数组
  322. DispatchQueue.main.async {
  323. facesArray.append(asset)
  324. }
  325. }
  326. } catch {
  327. print("人脸检测失败: \(error)")
  328. }
  329. group.leave()
  330. }
  331. }
  332. // 等待所有检测完成后更新结果
  333. group.notify(queue: .main) {
  334. if !facesArray.isEmpty {
  335. peopleGroups["包含人脸的照片"] = facesArray
  336. }
  337. completion(peopleGroups)
  338. }
  339. }
  340. // 识别截图
  341. private func fetchScreenshots(from assets: PHFetchResult<PHAsset>,
  342. completion: @escaping ([PHAsset]) -> Void) {
  343. var screenshots: [PHAsset] = []
  344. // 获取系统的截图智能相册
  345. let screenshotAlbums = PHAssetCollection.fetchAssetCollections(
  346. with: .smartAlbum,
  347. subtype: .smartAlbumScreenshots,
  348. options: nil
  349. )
  350. // 从截图相册中获取所有截图
  351. screenshotAlbums.enumerateObjects { collection, _, _ in
  352. let fetchOptions = PHFetchOptions()
  353. let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions)
  354. screenshotAssets.enumerateObjects { asset, _, _ in
  355. screenshots.append(asset)
  356. }
  357. }
  358. completion(screenshots)
  359. }
  360. private func detectBlurryPhotos(from assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
  361. var blurryPhotos: [PHAsset] = []
  362. let group = DispatchGroup()
  363. let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent)
  364. let resultQueue = DispatchQueue(label: "com.app.blurryResult")
  365. let semaphore = DispatchSemaphore(value: 8) // 增加并发数
  366. // 创建进度追踪
  367. var processedCount = 0
  368. let totalCount = assets.count
  369. for i in 0..<assets.count {
  370. let asset = assets[i]
  371. group.enter()
  372. semaphore.wait()
  373. let options = PHImageRequestOptions()
  374. options.deliveryMode = .fastFormat // 使用快速模式
  375. options.isSynchronous = false
  376. options.resizeMode = .fast
  377. // 降低处理图片的分辨率
  378. PHImageManager.default().requestImage(
  379. for: asset,
  380. targetSize: CGSize(width: 300, height: 300), // 降低分辨率
  381. contentMode: .aspectFit,
  382. options: options
  383. ) { image, _ in
  384. defer {
  385. semaphore.signal()
  386. }
  387. guard let image = image,
  388. let cgImage = image.cgImage else {
  389. group.leave()
  390. return
  391. }
  392. processingQueue.async {
  393. // 快速模糊检测
  394. let isBlurry = self.quickBlurCheck(cgImage)
  395. if isBlurry {
  396. resultQueue.async {
  397. blurryPhotos.append(asset)
  398. }
  399. }
  400. // 更新进度
  401. resultQueue.async {
  402. processedCount += 1
  403. let progress = Float(processedCount) / Float(totalCount)
  404. DispatchQueue.main.async {
  405. print("模糊检测进度: \(Int(progress * 100))%")
  406. }
  407. }
  408. group.leave()
  409. }
  410. }
  411. }
  412. group.notify(queue: .main) {
  413. completion(blurryPhotos)
  414. }
  415. }
  416. // 快速模糊检测方法
  417. private func quickBlurCheck(_ image: CGImage) -> Bool {
  418. let width = image.width
  419. let height = image.height
  420. let stride = 2 // 跳过一些像素以加快速度
  421. // 提前检查图像尺寸是否合法
  422. guard width > (2 * stride), height > (2 * stride) else {
  423. return false // 小尺寸图像直接判定为模糊或清晰
  424. }
  425. var buffer = [UInt8](repeating: 0, count: width * height)
  426. let colorSpace = CGColorSpaceCreateDeviceGray()
  427. guard let context = CGContext(
  428. data: &buffer,
  429. width: width,
  430. height: height,
  431. bitsPerComponent: 8,
  432. bytesPerRow: width,
  433. space: colorSpace,
  434. bitmapInfo: CGImageAlphaInfo.none.rawValue
  435. ) else {
  436. return false
  437. }
  438. context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
  439. // 使用简化的拉普拉斯算子
  440. var score: Double = 0
  441. for y in stride..<(height-stride) where y % stride == 0 {
  442. for x in stride..<(width-stride) where x % stride == 0 {
  443. let current = Int(buffer[y * width + x])
  444. let left = Int(buffer[y * width + (x - stride)])
  445. let right = Int(buffer[y * width + (x + stride)])
  446. let top = Int(buffer[(y - stride) * width + x])
  447. let bottom = Int(buffer[(y + stride) * width + x])
  448. // 简化的边缘检测
  449. let dx = abs(left - right)
  450. let dy = abs(top - bottom)
  451. score += Double(max(dx, dy))
  452. }
  453. }
  454. // 归一化分数
  455. let normalizedScore = score / Double((width * height) / (stride * stride))
  456. // 调整阈值(可能需要根据实际效果调整)
  457. let threshold = 20.0
  458. return normalizedScore < threshold
  459. }
  460. }
  461. extension ClassifyPhoto {
  462. // 获取资源大小的辅助方法
  463. func getAssetSize(_ asset: PHAsset, completion: @escaping (Int64) -> Void) {
  464. let resources = PHAssetResource.assetResources(for: asset)
  465. if let resource = resources.first {
  466. var size: Int64 = 0
  467. if let unsignedInt64 = resource.value(forKey: "fileSize") as? CLong {
  468. size = Int64(unsignedInt64)
  469. }
  470. completion(size)
  471. } else {
  472. completion(0)
  473. }
  474. }
  475. // 计算资产组的总大小
  476. func calculateAssetsSize(_ assets: [PHAsset], completion: @escaping (PhotoSizeInfo) -> Void) {
  477. let group = DispatchGroup()
  478. var totalSize: Int64 = 0
  479. for asset in assets {
  480. group.enter()
  481. getAssetSize(asset) { size in
  482. totalSize += size
  483. group.leave()
  484. }
  485. }
  486. group.notify(queue: .main) {
  487. completion(PhotoSizeInfo(totalSize: totalSize, count: assets.count))
  488. }
  489. }
  490. }