项目中需要实现人脸登陆功能,实现思路为在前端检测人脸,把人脸照片发送到后端识别,返回用户token登陆成功前端调用摄像头使用tracking.js检测视频流中的人脸,检测到人脸后拍照上传后端。后端使用face_recognition人脸识别库,使用Flask提供restfulAP供前端调用实现效果如下图:登陆界面:摄像头检测人脸界面:前端代码如下:


export default {
name: 'facelogin',
data() {
return {
count: 0,
isdetected: '请您保持脸部在画面中央',
videoEl: {},
canvasEL: {},
images: [],
trackCcv: false,
trackTracking: false,
autoCaptureTrackTraking: false,
userMediaConstraints: {
audio: false,
video: {
// ideal(应用最理想的)
width: {
min: 320,
ideal: 1280,
max: 1920
},
height: {
min: 240,
ideal: 720,
max: 1080
},
// frameRate受限带宽传输时,低帧率可能更适宜
frameRate: {
min: 15,
ideal: 30,
max: 60
},
// 摄像头翻转
facingMode: 'user'
}
}
}
},
computed: {
FaceisDetected() {
return this.isdetected
}
},
created() {
this.changeView()
},

mounted() {
// The getUserMedia interface is used for handling camera input.
// Some browsers need a prefix so here we're covering all the options
navigator.getMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
this.init()
},
methods: {
async init() {
this.videoEl = this.$refs.videoDom
this.canvasEL = this.$refs.canvasDOM
await navigator.mediaDevices
.getUserMedia(this.userMediaConstraints)
.then(this.getMediaStreamSuccess)
.catch(this.getMediaStreamError)
await this.onPlay()
},
async onPlay() {
debugHelper.log('onPlay')


this.onTrackTracking()
},
changeView() {
this.setTitle('刷脸登陆')
this.setBackDisabled(false)
this.setBackIcon('arrow_back')
msgbus.vm.setBottomNavVisible(false)
msgbus.vm.setBottomBtnVisible(false)
msgbus.vm.setMsgInputVisible({ value: false })
},


onTrackTracking() {
const context = this
const video = this.videoEl
const canvas = this.canvasEL
const canvasContext = canvas.getContext('2d')
let tracker = new tracking.ObjectTracker('face')


video.pause()
video.src = ''
tracker.setInitialScale(4)
tracker.setStepSize(2)
tracker.setEdgesDensity(0.1)
tracking.track('#video_cam', tracker, { camera: true })
tracker.on('track', function(event) {
const { autoCaptureTrackTraking } = context
canvasContext.clearRect(0, 0, canvas.width, canvas.height)
event.data.forEach(function({ x, y, width, height }) {

canvasContext.strokeStyle = '#a64ceb'

canvasContext.strokeRect(x, y, width, height)

canvasContext.font = '11px Helvetica'

canvasContext.fillStyle = '#fff'
})


if (!isEmpty(event.data) && context.count <= 10) {

if (context.count < 0) context.count = 0

context.count += 1

//debugHelper.log(context.count)

if (context.count > 10) {

context.isdetected = '已检测到人脸,正在登录'

//context.$router.push({ name: 'pwdlogin' })

}
} else {

context.count -= 1

if (context.count < 0) context.isdetected = '请您保持脸部在画面中央'

//this.isdetected = '已检测到人脸,正在登录'
}

})
},
onDownloadFile(item) {
const link = document.createElement('a')
link.href = item
link.download = `cahyo-${new Date().toISOString()}.png`
link.click()


link.remove()
},
onTakeCam() {
const canvas = document.createElement('canvas')
const video = this.$el.querySelector('#video_cam')
const canvasContext = canvas.getContext('2d')


if (video.videoWidth && video.videoHeight) {
const isBiggerW = video.videoWidth > video.videoHeight
const fixVidSize = isBiggerW ? video.videoHeight : video.videoWidth
let offsetLeft = 0
let offsetTop = 0


if (isBiggerW) offsetLeft = (video.videoWidth - fixVidSize) / 2
else offsetTop = (video.videoHeight - fixVidSize) / 2


// make canvas size 300px
canvas.width = canvas.height = 300
const { width, height } = canvas


canvasContext.drawImage(

video,

offsetLeft,

offsetTop,

fixVidSize,

fixVidSize,

0,

0,

width,

height
)
const image = canvas.toDataURL('image/png')
this.images.push(image)
}
},
onDetectFace(param, index) {
const imgItem = document.querySelector(`.img-item-${index}`)
const image = new Image()
image.src = param


const tracker = new tracking.ObjectTracker('face')
tracker.setStepSize(1.7)
tracking.track(image, tracker)


tracker.on('track', function(event) {
event.data.forEach(function(rect) {

window.plot(rect.x, rect.y, rect.width, rect.height)
})
})


window.plot = function(x, y, w, h) {
const rect = document.createElement('div')
document.querySelector(`.img-item-${index}`).appendChild(rect)
rect.classList.add('rect')
rect.style.width = w + 'px'
rect.style.height = h + 'px'
rect.style.left = x + 'px'
rect.style.top = y + 'px'
rect.style.border = '2px solid yellow'
rect.style.position = 'absolute'
}
},
getMediaStreamSuccess(stream) {
window.stream = stream // make stream available to browser console
this.videoEl.srcObject = stream
debugHelper.log('getMediaStreamSuccess1')
//this.$store.commit('setVideoCanvasObject', this.videoEl)
debugHelper.log('getMediaStreamSuccess2')
},
// 视频媒体流失败
getMediaStreamError(error) {
alert('视频媒体流获取错误' + error)
},
// 结束媒体流
stopMediaStreamTrack() {
clearInterval(this.timeInterval)
if (typeof window.stream === 'object') {
this.videoEl.srcObject = null
//this.$store.commit('setVideoCanvasObject', '')
window.stream.getTracks().forEach(track => track.stop())
}
},


export default {
name: 'facelogin',
data() {
return {
count: 0,
isdetected: '请您保持脸部在画面中央',
videoEl: {},
canvasEL: {},
images: [],
trackCcv: false,
trackTracking: false,
autoCaptureTrackTraking: false,
userMediaConstraints: {
audio: false,
video: {
// ideal(应用最理想的)
width: {
min: 320,
ideal: 1280,
max: 1920
},
height: {
min: 240,
ideal: 720,
max: 1080
},
// frameRate受限带宽传输时,低帧率可能更适宜
frameRate: {
min: 15,
ideal: 30,
max: 60
},
// 摄像头翻转
facingMode: 'user'
}
}
}
},
computed: {
FaceisDetected() {
return this.isdetected
}
},
created() {
this.changeView()
},

mounted() {
// The getUserMedia interface is used for handling camera input.
// Some browsers need a prefix so here we're covering all the options
navigator.getMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
this.init()
},
methods: {
async init() {
this.videoEl = this.$refs.videoDom
this.canvasEL = this.$refs.canvasDOM
await navigator.mediaDevices
.getUserMedia(this.userMediaConstraints)
.then(this.getMediaStreamSuccess)
.catch(this.getMediaStreamError)
await this.onPlay()
},
async onPlay() {
debugHelper.log('onPlay')


this.onTrackTracking()
},
changeView() {
this.setTitle('刷脸登陆')
this.setBackDisabled(false)
this.setBackIcon('arrow_back')
msgbus.vm.setBottomNavVisible(false)
msgbus.vm.setBottomBtnVisible(false)
msgbus.vm.setMsgInputVisible({ value: false })
},


onTrackTracking() {
const context = this
const video = this.videoEl
const canvas = this.canvasEL
const canvasContext = canvas.getContext('2d')
let tracker = new tracking.ObjectTracker('face')


video.pause()
video.src = ''
tracker.setInitialScale(4)
tracker.setStepSize(2)
tracker.setEdgesDensity(0.1)
tracking.track('#video_cam', tracker, { camera: true })
tracker.on('track', function(event) {
const { autoCaptureTrackTraking } = context
canvasContext.clearRect(0, 0, canvas.width, canvas.height)
event.data.forEach(function({ x, y, width, height }) {

canvasContext.strokeStyle = '#a64ceb'

canvasContext.strokeRect(x, y, width, height)

canvasContext.font = '11px Helvetica'

canvasContext.fillStyle = '#fff'
})


if (!isEmpty(event.data) && context.count <= 10) {

if (context.count < 0) context.count = 0

context.count += 1

//debugHelper.log(context.count)

if (context.count > 10) {

context.isdetected = '已检测到人脸,正在登录'

//context.$router.push({ name: 'pwdlogin' })

}
} else {

context.count -= 1

if (context.count < 0) context.isdetected = '请您保持脸部在画面中央'

//this.isdetected = '已检测到人脸,正在登录'
}

})
},
onDownloadFile(item) {
const link = document.createElement('a')
link.href = item
link.download = `cahyo-${new Date().toISOString()}.png`
link.click()


link.remove()
},
onTakeCam() {
const canvas = document.createElement('canvas')
const video = this.$el.querySelector('#video_cam')
const canvasContext = canvas.getContext('2d')


if (video.videoWidth && video.videoHeight) {
const isBiggerW = video.videoWidth > video.videoHeight
const fixVidSize = isBiggerW ? video.videoHeight : video.videoWidth
let offsetLeft = 0
let offsetTop = 0


if (isBiggerW) offsetLeft = (video.videoWidth - fixVidSize) / 2
else offsetTop = (video.videoHeight - fixVidSize) / 2


// make canvas size 300px
canvas.width = canvas.height = 300
const { width, height } = canvas


canvasContext.drawImage(

video,

offsetLeft,

offsetTop,

fixVidSize,

fixVidSize,

0,

0,

width,

height
)
const image = canvas.toDataURL('image/png')
this.images.push(image)
}
},
onDetectFace(param, index) {
const imgItem = document.querySelector(`.img-item-${index}`)
const image = new Image()
image.src = param


const tracker = new tracking.ObjectTracker('face')
tracker.setStepSize(1.7)
tracking.track(image, tracker)


tracker.on('track', function(event) {
event.data.forEach(function(rect) {

window.plot(rect.x, rect.y, rect.width, rect.height)
})
})


window.plot = function(x, y, w, h) {
const rect = document.createElement('div')
document.querySelector(`.img-item-${index}`).appendChild(rect)
rect.classList.add('rect')
rect.style.width = w + 'px'
rect.style.height = h + 'px'
rect.style.left = x + 'px'
rect.style.top = y + 'px'
rect.style.border = '2px solid yellow'
rect.style.position = 'absolute'
}
},
getMediaStreamSuccess(stream) {
window.stream = stream // make stream available to browser console
this.videoEl.srcObject = stream
debugHelper.log('getMediaStreamSuccess1')
//this.$store.commit('setVideoCanvasObject', this.videoEl)
debugHelper.log('getMediaStreamSuccess2')
},
// 视频媒体流失败
getMediaStreamError(error) {
alert('视频媒体流获取错误' + error)
},
// 结束媒体流
stopMediaStreamTrack() {
clearInterval(this.timeInterval)
if (typeof window.stream === 'object') {
this.videoEl.srcObject = null
//this.$store.commit('setVideoCanvasObject', '')
window.stream.getTracks().forEach(track => track.stop())
}
},总结总结总结