首先到https://developer.apple.com/machine-learning/ 下载一个Resnet50.mlmodel文件,将它拖动到XCode项目文件夹:
编写代码如下:
import UIKit import CoreML class ViewController: UIViewController { override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view, typically from a nib. let image = UIImage(named: "sample") let CGFloat = 224.0 let height: CGFloat = 224.0 //获得一个基于位图的上下文,并设置其为当前的上下文 UIGraphicsBeginImageContext(CGSize( width, height: height)) //将从项目中加载的图像,绘制在上下文的指定区域 image?.draw(in: CGRect(x: 0, y: 0, width, height: height)) //从上下文中获得格式转换后的图像 let newImage = UIGraphicsGetImageFromCurrentImageContext() //完成图像的格式转换后,关闭当前的上下文 UIGraphicsEndImageContext() if #available(iOS 11.0, *) { let resnet50 = Resnet50()//初始化机器学习模型的对象 guard let output = try? resnet50.prediction(image: PixelBufferFromImage(newImage!)!) else { fatalError("Unexpected Error") } print(output.classLabel) } } func createPixelBufferPool(_ Int32, _ height: Int32, _ pixelFormat: FourCharCode, _ maxBufferCount: Int32) -> CVPixelBufferPool? { var outputPool: CVPixelBufferPool? = nil let sourcePixelBufferOptions: NSDictionary = [kCVPixelBufferPixelFormatTypeKey: pixelFormat, kCVPixelBufferWidthKey: width, kCVPixelBufferHeightKey: height, kCVPixelFormatOpenGLESCompatibility: true, kCVPixelBufferIOSurfacePropertiesKey: NSDictionary()] let pixelBufferPoolOptions: NSDictionary = [kCVPixelBufferPoolMinimumBufferCountKey: maxBufferCount] CVPixelBufferPoolCreate(kCFAllocatorDefault, pixelBufferPoolOptions, sourcePixelBufferOptions, &outputPool) return outputPool } func PixelBufferFromImage(_ image: UIImage) -> CVPixelBuffer?{ let size = image.size var pxbuffer : CVPixelBuffer? let pixelBufferPool = createPixelBufferPool(224, 224, FourCharCode(kCVPixelFormatType_32BGRA), 2056) // Hard coded values for demo purposes. let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool!, &pxbuffer) guard (status == kCVReturnSuccess) else{ return nil } CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0)) let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!) let rgbColorSpace = CGColorSpaceCreateDeviceRGB() let context = CGContext(data: pxdata, Int(size.width), height: Int(size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue) context?.translateBy(x: 0, y: image.size.height) context?.scaleBy(x: 1.0, y: -1.0) UIGraphicsPushContext(context!) image.draw(in: CGRect(x: 0, y: 0, size.width, height: size.height)) UIGraphicsPopContext() CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0)) return pxbuffer } override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() // Dispose of any resources that can be recreated. } }
我加载了一个煎蛋的图片,它识别成水母。。。还真有点像,它比小孩更有联想力。。