先讲一下基础控涧,资源的话可以留言,抽空我把它传到GitHub上,这里没写收积分,竟然充值才能下载,我下载也要充值,牛!
ARSCNView 可以理解画布或者场景
1 配置 ARWorldTrackingConfiguration AR追踪识别相关
ARFaceTrackingConfiguration 面部识别
2 加载资源 图片资源就不多讲,手机拍照去掉底图就可以了,3d模型需要带linda的ipad pro或者iphone pro构建
一共两种一种是识别图片,一种是识别3d模型,感觉图片识别效果较好,但是麻烦点是获取现实世界空间坐标,而且对运动物体还是不是很友好,所以后来准备看一下CoreML相关
let configuartion = ARWorldTrackingConfiguration()
var images :[UIImage] = []for i in 1...19{let image = UIImage(named: "AR\(i)")images.append(image!)}//加载图片资源,通过图片名configuartion.detectionImages = loadedImagesFromDirectoryContents(images)configuartion.maximumNumberOfTrackedImages = 1//加载3d资源,3d资源文件如下图所示
if let referenceObj = ARReferenceObject.referenceObjects(inGroupNamed: "AR Resource Group", bundle: nil){print("")configuartion.detectionObjects = referenceObj}configuartion.isAutoFocusEnabled = true // 确保自动对焦开启
// configuartion.planeDetection = .horizontalconfiguartion.isLightEstimationEnabled = truesceneView.session.run(configuartion, options: [.resetTracking,.removeExistingAnchors])//遵守的代理协议ARSessionDelegate 会话协议sceneView.session.delegate = self//ARSCNViewDelegate scnview场景协议sceneView.delegate = self//显示调试参数sceneView.debugOptions = [SCNDebugOptions.showFeaturePoints]//通过图片名加载图片func loadedImagesFromDirectoryContents(_ images: [UIImage]) -> Set<ARReferenceImage>{var index = 0var customReferenceSet = Set<ARReferenceImage>()images.forEach { (downloadedImage) in//1. Convert The UIImage To A CGImageguard let cgImage = downloadedImage.cgImage else { return }//2. Get The Width Of The Imagelet imageWidth = CGFloat(cgImage.width)//3. Create A Custom AR Reference Image With A Unique Namelet customARReferenceImage = ARReferenceImage(cgImage, orientation: CGImagePropertyOrientation.up, physicalWidth: imageWidth)customARReferenceImage.name = "MyCustomARImage\(index)"//4. Insert The Reference Image Into Our SetcustomReferenceSet.insert(customARReferenceImage)print("ARReference Image == \(customARReferenceImage)")index += 1}//5. Return The Setreturn customReferenceSet}
3d资源文件,文件格式是.arobject,该文件是通过带Linda的iPad pro设备扫描生成的,详情参考代码demo
ARSessionDelegate 协议
/*会话失败*/func session(_ session: ARSession, didFailWithError error: any Error) {print("didFailWithError")}/*相机更改了追踪模式*/func session(_ session: ARSession, cameraDidChangeTrackingState camera: ARCamera) {let state = camera.trackingStateprint("cameraDidChangeTrackingState")}/*当会话中断时调用此方法。会话将被中断,不再能跟踪什么时候它无法接收所需的传感器数据。 当视频捕获中断时,例如当应用程序被发送到后台或当有的时候多个前台应用程序(请参阅AVCaptureSessionInterruptReason)。在中断结束之前,不会传送额外的帧更新。*/func sessionWasInterrupted(_ session: ARSession) {print("sessionWasInterrupted")}/*当会话中断结束时调用。会话将从最后一次已知的状态继续运行一次中断已经结束。 如果设备移动,锚点将不对齐。为避免这种情况,一些应用程序可能想要重置跟踪(请参阅ARSessionRunOptions)。*/func sessionInterruptionEnded(_ session: ARSession) {print("sessionInterruptionEnded")}/*当会话输出新的音频采样缓冲区时*/func session(_ session: ARSession, didOutputAudioSampleBuffer audioSampleBuffer: CMSampleBuffer) {print("didOutputAudioSampleBuffer")}func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {//更新锚点位置,存入的锚点信息,也可以是图片锚点for anchor in anchors {guard let objectAnchor = anchor as? ARObjectAnchor else { continue }// 查找对应的框节点if let frameNode = sceneView.scene.rootNode.childNode(withName: "frame", recursively: true) {
// frameNode.position = SCNVector3(
// objectAnchor.transform.columns.3.x,
// objectAnchor.transform.columns.3.y,
// objectAnchor.transform.columns.3.z
// )frameNode.simdTransform = anchor.transform}}// guard let trackedAnchor = trackedAnchor else{return}
//
// for anchor in anchors{
// if anchor.identifier == trackedAnchor.identifier{
// let newTransform = anchor.transform
// let position = SCNVector3(newTransform.columns.3.x,
// newTransform.columns.3.y,
// newTransform.columns.3.z)
//
// DispatchQueue.main.async {
// self.trackNode?.position = position
// }
// }
// }
// print("didupdate session")
// //跟踪目标物体
// for anchor in anchors {
// if let objectAnchor = anchor as? ARObjectAnchor {
// print("Object moved to: \(objectAnchor.transform)")
//
// // 更新虚拟对象的位置
// if let node = sceneView.node(for: objectAnchor) {
// let transform = objectAnchor.transform
// node.simdTransform = transform
// }
// }
// }}func session(_ session: ARSession, didUpdate frame: ARFrame) {DispatchQueue.global().async {self.handleFrame(session: session, frame: frame)}}
ARSCNViewDelegate代理
/*实现这个为给定的锚点提供一个自定义节点。@discussion 此节点将自动添加到场景图。如果未实现此方法,将自动创建一个节点。如果返回nil,锚点将被忽略。@param renderer渲染场景的渲染器。@param anchor添加的锚点。@return将映射到锚点或nil的节点*/
// func renderer(_ renderer: any SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
// return nil
// }/*当新节点已映射到给定的锚点时调用。@param renderer 渲染场景的渲染器。@param node 映射到锚点的节点。@param anchor 添加的锚点。*/func renderer(_ renderer: any SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
// print("添加新的节点锚点数据到场景中")if let objectAnchor = anchor as? ARObjectAnchor {print("Detected object: \(objectAnchor.referenceObject.name ?? "Unknown")")// 创建一个框架来圈住物体let frameNode = createFrame(for: objectAnchor.referenceObject)frameNode.position = SCNVector3(objectAnchor.transform.columns.3.x,objectAnchor.transform.columns.3.y,objectAnchor.transform.columns.3.z)sceneView.scene.rootNode.addChildNode(frameNode)}// if let imageAnchor = anchor as? ARImageAnchor{
// let image = imageAnchor.referenceImage
//
// if let imagename = image.name, imagename.hasPrefix("MyCustomARImage"){
// // 创建一个虚拟对象(如盒子)来标记目标位置let box = SCNBox(width: 0.1, height: 0.1, length: 0.1, chamferRadius: 0)let material = SCNMaterial()material.diffuse.contents = UIColor.redbox.materials = [material]let boxNode = SCNNode(geometry: box)boxNode.position = SCNVector3(0, 0, 0)node.addChildNode(boxNode)
// DispatchQueue.main.async {
// self.showToast()
// }
// }
// }
}/*当节点将使用给定锚点的数据进行更新时调用。@param renderer 渲染场景的渲染器。@param node 将被更新的节点。@param anchor 即将更新的锚点。*/func renderer(_ renderer: any SCNSceneRenderer, willUpdate node: SCNNode, for anchor: ARAnchor) {
// print("即将更新新节点锚点数据")}/*当节点将使用给定锚点的数据进行更新时调用。@param renderer 渲染场景的渲染器。@param node 将被更新的节点。@param anchor 已更新的锚点。*/func renderer(_ renderer: any SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
// print("已经更新节点锚点数据")}/*当映射节点已从给定锚点的场景图中删除时调用。@param renderer渲染场景的渲染器。@param node已删除的节点。@param anchor已删除的锚点。*/func renderer(_ renderer: any SCNSceneRenderer, didRemove node: SCNNode, for anchor: ARAnchor) {print("删除节点")}
以上是整个代码流程
图片识别部分如下
private func handleFrame(session: ARSession,frame: ARFrame){let i = frame.timestamp - (self.currentFrmae?.timestamp ?? 0)
// print("time i\(i)")if i < 1{return}self.currentFrmae = frameguard let frameImg = getCurrentFrameImage(from: session) else{return}var isMatched:Bool = falsefor img in self.images{
// if compareImages(image1: frameImg, image2: img) == 1{
// isMatched = true
// break
// }let im1 = frameImglet im2 = imgmatchImages(image1: frameImg, image2: img) { [weak self]isok inif isok{DispatchQueue.main.async {self?.showToast()}}}}if isMatched{DispatchQueue.main.async {self.showToast()}}}//获取当前会话帧数据func getCurrentFrameImage(from session: ARSession) -> UIImage? {guard let currentFrame = session.currentFrame else {print("当前没有 ARFrame")return nil}let pixelBuffer = currentFrame.capturedImagelet ciImage = CIImage(cvPixelBuffer: pixelBuffer)let context = CIContext()guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else {return nil}return UIImage(cgImage: cgImage)}//对比图片2func matchImages(image1: UIImage, image2: UIImage, completion: @escaping (Bool) -> Void) {guard let cgImage1 = image1.cgImage, let cgImage2 = image2.cgImage else {completion(false)return}//使用自定义检测任务let request1 = VNGenerateImageFeaturePrintRequest()let request2 = VNGenerateImageFeaturePrintRequest()let request3 = VNDetectRectanglesRequest()request3.minimumConfidence = 0.8request3.minimumAspectRatio = 0.1request3.maximumAspectRatio = 1.0request3.quadratureTolerance = 10let handler1 = VNImageRequestHandler(cgImage: cgImage1, options: [:])let handler2 = VNImageRequestHandler(cgImage: cgImage2, options: [:])let handler3 = VNImageRequestHandler(cgImage: cgImage1, options: [:])do {try handler1.perform([request1])try handler2.perform([request2])try handler3.perform([request3])guard let featurePrint1 = request1.results?.first as? VNFeaturePrintObservation,let featurePrint2 = request2.results?.first as? VNFeaturePrintObservation else {completion(false)return}if let observations = request3.results as? [VNRectangleObservation], !observations.isEmpty{// 假设目标区域是第一个匹配的矩形let boundingBox = observations.first?.boundingBoxprint("boundRext: \(boundingBox)")}var distance: Float = 0try featurePrint1.computeDistance(&distance, to: featurePrint2)print("匹配值 \(distance)")if distance < 0.8{ //距离0.8mprint("图片相似")}completion(distance < 0.8) // 设定相似度阈值} catch {print("匹配失败:\(error)")completion(false)}}