| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583 |
- import Photos
- import Vision
- class ClassifyPhoto {
-
- struct PhotoSizeInfo {
- var totalSize: Int64 = 0
- var count: Int = 0
- }
- struct ClassifiedPhotos {
- var screenshots: [PHAsset] = []
- var locations: [String: [PHAsset]] = [:] // 按地点分组
- var people: [String: [PHAsset]] = [:] // 按人物分组
- var similarPhotos: [[PHAsset]] = [] // 存储相似照片组
- var blurryPhotos: [PHAsset] = [] // 添加模糊照片数组
-
- // 添加容量信息
- var screenshotsSize: PhotoSizeInfo = PhotoSizeInfo()
- var locationsSize: PhotoSizeInfo = PhotoSizeInfo()
- var peopleSize: PhotoSizeInfo = PhotoSizeInfo()
- var similarPhotosSize: PhotoSizeInfo = PhotoSizeInfo()
- var blurryPhotosSize: PhotoSizeInfo = PhotoSizeInfo() // 添加模糊照片容量信息
- }
-
- // 添加位置缓存
- private var locationCache: [String: String] = [:]
- func classifyPhotos(
- assets: PHFetchResult<PHAsset>,
- progressHandler: @escaping (String, Float) -> Void,
- completion: @escaping (ClassifiedPhotos) -> Void
- ) {
- // 在后台队列处理
- DispatchQueue.global(qos: .userInitiated).async {
- var result = ClassifiedPhotos()
- let group = DispatchGroup()
- // 开始处理
- DispatchQueue.main.async {
- progressHandler("正在加载照片...", 0.0)
- }
-
- // 先处理模糊照片检测(占进度的 30%)
- group.enter()
- progressHandler("正在检测模糊照片...", 0.0)
- self.detectBlurryPhotos(from: assets) { blurryPhotos in
- result.blurryPhotos = blurryPhotos
- progressHandler("模糊照片检测完成", 0.3)
- group.leave()
- }
- // 1. 检测截图 (占总进度的 20%)
- group.enter()
- self.fetchScreenshots(from: assets) { screenshots in
- result.screenshots = screenshots
- DispatchQueue.main.async {
- progressHandler("正在检测截图...", 0.3)
- }
- group.leave()
- }
- // 2. 检测相似照片 (占总进度的 80%)
- group.enter()
- self.detectSimilarPhotos(
- assets: assets,
- progressHandler: { stage, progress in
- // 将相似照片检测的进度映射到 20%-100% 的范围
- let mappedProgress = 0.3 + (progress * 0.6)
- DispatchQueue.main.async {
- progressHandler(stage, mappedProgress)
- }
- }
- ) { similarPhotos in
- result.similarPhotos = similarPhotos
- group.leave()
- }
-
- // 3. 按地点分类 (占总进度的 20%)
- group.enter()
- self.classifyByLocation(assets: assets) { locationGroups in
- result.locations = locationGroups
- DispatchQueue.main.async {
- progressHandler("正在按地点分类...", 0.8)
- }
- group.leave()
- }
- // 4. 按人物分类 (占总进度的 20%)
- group.enter()
- self.classifyByPeople(assets: assets) { peopleGroups in
- result.people = peopleGroups
- DispatchQueue.main.async {
- progressHandler("正在按人物分类...", 1.0)
- }
- group.leave()
- }
-
- // // 添加模糊照片检测
- // group.enter()
- // self.detectBlurryPhotos(from: assets) { blurryPhotos in
- // result.blurryPhotos = blurryPhotos
- // DispatchQueue.main.async {
- // progressHandler("正在检测模糊照片...", 1.0)
- // }
- // group.leave()
- // }
-
- // 在所有分类完成后计算大小
- group.notify(queue: .main) {
- let sizeGroup = DispatchGroup()
-
-
- // 计算截图大小
- sizeGroup.enter()
- self.calculateAssetsSize(result.screenshots) { sizeInfo in
- result.screenshotsSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 计算地点照片大小
- sizeGroup.enter()
- let locationAssets = Array(result.locations.values.flatMap { $0 })
- self.calculateAssetsSize(locationAssets) { sizeInfo in
- result.locationsSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 计算人物照片大小
- sizeGroup.enter()
- let peopleAssets = Array(result.people.values.flatMap { $0 })
- self.calculateAssetsSize(peopleAssets) { sizeInfo in
- result.peopleSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 计算相似照片大小
- sizeGroup.enter()
- let similarAssets = Array(result.similarPhotos.flatMap { $0 })
- self.calculateAssetsSize(similarAssets) { sizeInfo in
- result.similarPhotosSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 计算模糊照片大小
- sizeGroup.enter()
- self.calculateAssetsSize(result.blurryPhotos) { sizeInfo in
- result.blurryPhotosSize = sizeInfo
- sizeGroup.leave()
- }
-
- // 所有大小计算完成后回调
- sizeGroup.notify(queue: .main) {
- progressHandler("分类完成", 1.0)
- completion(result)
- }
- }
- // // 等待所有处理完成
- // group.notify(queue: .main) {
- // progressHandler("分类完成", 1.0)
- // completion(result)
- // }
- }
- }
- private func detectSimilarPhotos(
- assets: PHFetchResult<PHAsset>,
- progressHandler: @escaping (String, Float) -> Void,
- completion: @escaping ([[PHAsset]]) -> Void
- ) {
- var similarGroups: [[PHAsset]] = []
- let group = DispatchGroup()
-
- if #available(iOS 13.0, *) {
- var imageFeatures: [(asset: PHAsset, feature: VNFeaturePrintObservation)] = []
-
- // 创建处理队列
- let processingQueue = DispatchQueue(label: "com.app.similarPhotos", qos: .userInitiated)
- let semaphore = DispatchSemaphore(value: 5)
-
- // 1. 提取所有图片的特征
- let totalAssets = assets.count
- var processedAssets = 0
-
- progressHandler("正在加载照片...", 0.0)
-
- for i in 0..<assets.count {
- let asset = assets[i]
- group.enter()
- semaphore.wait()
-
- let options = PHImageRequestOptions()
- options.deliveryMode = .highQualityFormat
- options.isSynchronous = false
- options.resizeMode = .exact
-
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 256, height: 256),
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- defer {
- semaphore.signal()
- }
-
- guard let image = image,
- let cgImage = image.cgImage else {
- group.leave()
- return
- }
-
- processingQueue.async {
- do {
- let requestHandler = VNImageRequestHandler(cgImage: cgImage, options: [:])
- let request = VNGenerateImageFeaturePrintRequest()
- try requestHandler.perform([request])
-
- if let result = request.results?.first as? VNFeaturePrintObservation {
- imageFeatures.append((asset, result))
-
- // 更新特征提取进度
- processedAssets += 1
- let progress = Float(processedAssets) / Float(totalAssets)
- progressHandler("正在提取特征...", progress * 0.6)
- }
- } catch {
- print("特征提取失败: \(error)")
- }
- group.leave()
- }
- }
- }
-
- // 2. 比较特征相似度并分组
- group.notify(queue: processingQueue) {
- progressHandler("正在比较相似度...", 0.6)
-
- // 近似度
- let similarityThreshold: Float = 0.7
- var processedComparisons = 0
- let totalComparisons = (imageFeatures.count * (imageFeatures.count - 1)) / 2
- var processedIndices = Set<Int>()
-
- for i in 0..<imageFeatures.count {
- if processedIndices.contains(i) { continue }
-
- var similarGroup: [PHAsset] = [imageFeatures[i].asset]
- processedIndices.insert(i)
-
- for j in (i + 1)..<imageFeatures.count {
- if processedIndices.contains(j) { continue }
-
- do {
- var distance: Float = 0
- try imageFeatures[i].feature.computeDistance(&distance, to: imageFeatures[j].feature)
-
- let similarity = 1 - distance
- if similarity >= similarityThreshold {
- similarGroup.append(imageFeatures[j].asset)
- processedIndices.insert(j)
- }
-
- // 更新比较进度
- processedComparisons += 1
- let compareProgress = Float(processedComparisons) / Float(totalComparisons)
- progressHandler("正在比较相似度...", 0.6 + compareProgress * 0.4)
- } catch {
- print("相似度计算失败: \(error)")
- }
- }
-
- if similarGroup.count > 1 {
- similarGroups.append(similarGroup)
- }
- }
-
- // 按照照片数量降序排序
- similarGroups.sort { $0.count > $1.count }
-
- DispatchQueue.main.async {
- completion(similarGroups)
- }
- }
- }
- }
- private func classifyByLocation(assets: PHFetchResult<PHAsset>,
- completion: @escaping ([String: [PHAsset]]) -> Void) {
- var locationGroups: [String: [PHAsset]] = [:]
- let group = DispatchGroup()
- let geocodeQueue = DispatchQueue(label: "com.app.geocoding")
- let semaphore = DispatchSemaphore(value: 10) // 限制并发请求数
- assets.enumerateObjects { asset, _, _ in
- if let location = asset.location {
- group.enter()
- semaphore.wait()
- geocodeQueue.async {
- let geocoder = CLGeocoder()
- geocoder.reverseGeocodeLocation(location) { placemarks, error in
- defer {
- semaphore.signal()
- group.leave()
- }
- if let placemark = placemarks?.first {
- let locationName = self.formatLocationName(placemark)
- DispatchQueue.main.async {
- if locationGroups[locationName] == nil {
- locationGroups[locationName] = []
- }
- locationGroups[locationName]?.append(asset)
- }
- }
- }
- }
- }
- }
- // 等待所有地理编码完成后回调
- group.notify(queue: .main) {
- completion(locationGroups)
- }
- }
- // 格式化地点名称(只返回城市名)
- private func formatLocationName(_ placemark: CLPlacemark) -> String {
- if let city = placemark.locality {
- return city
- } else if let area = placemark.administrativeArea {
- return area
- }
- return "其他"
- }
- // 按人物分类
- private func classifyByPeople(assets: PHFetchResult<PHAsset>,
- completion: @escaping ([String: [PHAsset]]) -> Void) {
- var peopleGroups: [String: [PHAsset]] = [:]
- let group = DispatchGroup()
- // 创建一个数组来存储检测到人脸的照片
- var facesArray: [PHAsset] = []
- // 遍历所有照片
- assets.enumerateObjects { asset, _, _ in
- group.enter()
- // 获取照片的缩略图进行人脸检测
- let options = PHImageRequestOptions()
- options.isSynchronous = false
- options.deliveryMode = .fastFormat
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 500, height: 500), // 使用较小的尺寸提高性能
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- guard let image = image else {
- group.leave()
- return
- }
- // 使用 Vision 框架检测人脸
- guard let ciImage = CIImage(image: image) else {
- group.leave()
- return
- }
- let request = VNDetectFaceRectanglesRequest()
- let handler = VNImageRequestHandler(ciImage: ciImage)
- do {
- try handler.perform([request])
- if let results = request.results, !results.isEmpty {
- // 检测到人脸,添加到数组
- DispatchQueue.main.async {
- facesArray.append(asset)
- }
- }
- } catch {
- print("人脸检测失败: \(error)")
- }
- group.leave()
- }
- }
- // 等待所有检测完成后更新结果
- group.notify(queue: .main) {
- if !facesArray.isEmpty {
- peopleGroups["包含人脸的照片"] = facesArray
- }
- completion(peopleGroups)
- }
- }
- // 识别截图
- private func fetchScreenshots(from assets: PHFetchResult<PHAsset>,
- completion: @escaping ([PHAsset]) -> Void) {
- var screenshots: [PHAsset] = []
- // 获取系统的截图智能相册
- let screenshotAlbums = PHAssetCollection.fetchAssetCollections(
- with: .smartAlbum,
- subtype: .smartAlbumScreenshots,
- options: nil
- )
- // 从截图相册中获取所有截图
- screenshotAlbums.enumerateObjects { collection, _, _ in
- let fetchOptions = PHFetchOptions()
- let screenshotAssets = PHAsset.fetchAssets(in: collection, options: fetchOptions)
- screenshotAssets.enumerateObjects { asset, _, _ in
- screenshots.append(asset)
- }
- }
- completion(screenshots)
- }
-
- private func detectBlurryPhotos(from assets: PHFetchResult<PHAsset>, completion: @escaping ([PHAsset]) -> Void) {
- var blurryPhotos: [PHAsset] = []
- let group = DispatchGroup()
- let processingQueue = DispatchQueue(label: "com.app.blurryDetection", attributes: .concurrent)
- let resultQueue = DispatchQueue(label: "com.app.blurryResult")
- let semaphore = DispatchSemaphore(value: 8) // 增加并发数
-
- // 创建进度追踪
- var processedCount = 0
- let totalCount = assets.count
-
- for i in 0..<assets.count {
- let asset = assets[i]
- group.enter()
- semaphore.wait()
-
- let options = PHImageRequestOptions()
- options.deliveryMode = .fastFormat // 使用快速模式
- options.isSynchronous = false
- options.resizeMode = .fast
-
- // 降低处理图片的分辨率
- PHImageManager.default().requestImage(
- for: asset,
- targetSize: CGSize(width: 300, height: 300), // 降低分辨率
- contentMode: .aspectFit,
- options: options
- ) { image, _ in
- defer {
- semaphore.signal()
- }
-
- guard let image = image,
- let cgImage = image.cgImage else {
- group.leave()
- return
- }
-
- processingQueue.async {
- // 快速模糊检测
- let isBlurry = self.quickBlurCheck(cgImage)
-
- if isBlurry {
- resultQueue.async {
- blurryPhotos.append(asset)
- }
- }
-
- // 更新进度
- resultQueue.async {
- processedCount += 1
- let progress = Float(processedCount) / Float(totalCount)
- DispatchQueue.main.async {
- print("模糊检测进度: \(Int(progress * 100))%")
- }
- }
-
- group.leave()
- }
- }
- }
-
- group.notify(queue: .main) {
- completion(blurryPhotos)
- }
- }
- // 快速模糊检测方法
- private func quickBlurCheck(_ image: CGImage) -> Bool {
-
- let width = image.width
- let height = image.height
- let stride = 2 // 跳过一些像素以加快速度
-
- // 提前检查图像尺寸是否合法
- guard width > (2 * stride), height > (2 * stride) else {
- return false // 小尺寸图像直接判定为模糊或清晰
- }
-
- var buffer = [UInt8](repeating: 0, count: width * height)
-
- let colorSpace = CGColorSpaceCreateDeviceGray()
- guard let context = CGContext(
- data: &buffer,
- width: width,
- height: height,
- bitsPerComponent: 8,
- bytesPerRow: width,
- space: colorSpace,
- bitmapInfo: CGImageAlphaInfo.none.rawValue
- ) else {
- return false
- }
-
- context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
-
- // 使用简化的拉普拉斯算子
- var score: Double = 0
-
- for y in stride..<(height-stride) where y % stride == 0 {
- for x in stride..<(width-stride) where x % stride == 0 {
- let current = Int(buffer[y * width + x])
- let left = Int(buffer[y * width + (x - stride)])
- let right = Int(buffer[y * width + (x + stride)])
- let top = Int(buffer[(y - stride) * width + x])
- let bottom = Int(buffer[(y + stride) * width + x])
-
- // 简化的边缘检测
- let dx = abs(left - right)
- let dy = abs(top - bottom)
- score += Double(max(dx, dy))
- }
- }
-
- // 归一化分数
- let normalizedScore = score / Double((width * height) / (stride * stride))
-
- // 调整阈值(可能需要根据实际效果调整)
- let threshold = 20.0
- return normalizedScore < threshold
- }
- }
- extension ClassifyPhoto {
-
- // 获取资源大小的辅助方法
- func getAssetSize(_ asset: PHAsset, completion: @escaping (Int64) -> Void) {
- let resources = PHAssetResource.assetResources(for: asset)
- if let resource = resources.first {
- var size: Int64 = 0
- if let unsignedInt64 = resource.value(forKey: "fileSize") as? CLong {
- size = Int64(unsignedInt64)
- }
- completion(size)
- } else {
- completion(0)
- }
- }
- // 计算资产组的总大小
- func calculateAssetsSize(_ assets: [PHAsset], completion: @escaping (PhotoSizeInfo) -> Void) {
- let group = DispatchGroup()
- var totalSize: Int64 = 0
-
- for asset in assets {
- group.enter()
- getAssetSize(asset) { size in
- totalSize += size
- group.leave()
- }
- }
-
- group.notify(queue: .main) {
- completion(PhotoSizeInfo(totalSize: totalSize, count: assets.count))
- }
- }
- }
|