I'm working on a 3D scanning function, and he needs to get real textures. I'm going to do it with ARkit. I've got the ARMeshAnchor and so about his current frame (converted to UIImage)
struct AFrame {
var capturedImage: UIImage?
var timestamp: TimeInterval
var camera: ARCamera
}
struct TextureData {
var anchor: ARMeshAnchor
var frames: [AFrame]
mutating func updateTexture(frame:ARFrame) {
self.frames.append(AFrame(capturedImage: getTextureImage(frame: frame)?.compressImageMid(maxLength: 512*512), timestamp: frame.timestamp, camera: frame.camera))
}
func getTextureImage(frame: ARFrame) -> UIImage? {
let pixelBuffer = frame.capturedImage
let image = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext(options:nil)
guard let cameraImage = context.createCGImage(image, from: image.extent) else {return nil}
return UIImage(cgImage: cameraImage)
}
}
Because the range of ARMeshAnchor may have exceeded a single frame, one ARMeshAnchor corresponds to multiple frames in TextureData. and I generate a SCNNode object for an ARMeshAnchor
func addNode(texture:TextureData) {
let anchor = texture.anchor
let aTrans = SCNMatrix4(anchor.transform)
let geom = SCNGeometry(geometry: anchor.geometry, modelMatrix: anchor.transform, needTexture: true)
// geom.firstMaterial?.diffuse.contents = texture.capturedImage ???
let meshNode = SCNNode(geometry: geom)
meshNode.simdTransform = anchor.transform
DispatchQueue.main.async {
self.scanNode.addChildNode(meshNode)
}
}
The problem I am currently encountering is how to use the correct frame according to the position of the faces provided by ARMeshAnchor. Or, how to use multiple pictures to add texture to a SCNGeometry
I have tried to get the current anchor from each frame (the system provides this method), and then use the faces of each anchor as a SCNNode to produce textures. This method is logically possible, but there will be serious performance problems. Because thousands of SCNNodes will be created
for f in 0..<faces.count {
let face = face(at: f, faces: faces)
for frame in texture.frames {
for fv in face {
let vert = vertex(at: UInt32(fv), vertices: vertices)
let vTrans = SCNMatrix4MakeTranslation(vert[0], vert[1], vert[2])
let wTrans = SCNMatrix4Mult(vTrans, aTrans)
let wPos = SCNVector3(wTrans.m41, wTrans.m42, wTrans.m43)
fVerts.append(wPos)
// let norm = normal(at: UInt32(fv), normals: normals)
// let nTrans = SCNMatrix4MakeTranslation(norm[0], norm[1], norm[2])
// let wNTrans = SCNMatrix4Mult(nTrans, aTrans)
// let wNPos = SCNVector3(wNTrans.m41, wTrans.m42, wNTrans.m43)
// fNorms.append(wNPos)
// here's where you would find the frame that best fits
// for simplicity, just use the last frame here
let tCoord = getTextureCoord(camera: frame.camera, vert: vert, aTrans: anchor.transform)
tCoords.append(tCoord)
}
}
let vertsSource = SCNGeometrySource(vertices: fVerts)
let facesSource = SCNGeometryElement(indices: [UInt32(0), UInt32(1), UInt32(2)], primitiveType: .triangles)
let textrSource = SCNGeometrySource(textureCoordinates: tCoords)
let geom = SCNGeometry(sources: [vertsSource,textrSource], elements: [facesSource])
geom.firstMaterial?.diffuse.contents = texture.capturedImage
let meshNode = SCNNode(geometry: geom)
DispatchQueue.main.async {
self.scanNode.addChildNode(meshNode)
}
}
}