python 未找到_音频_微前端_操作. so

eblbsuwk  于 2023-01-12  发布在  Python
关注(0)|答案(3)|浏览(104)

我的Python版本是3.7(Windows10,64位,tensorflow 2.0)
我用opencv实现了一个检测眨眼的程序。在pycham中按F5键执行正常操作。我用pyinstaller生成了exe文件。但是,当我运行生成的exe文件时,出现了错误。

**...............tensorflow .Python.框架.错误实现.未找到错误:C:\用户\用户名\应用程序数据\本地\温度_MEI 401122\tensorflow \文本\实验\微前端\Python\操作_音频_微前端_操作. so未找到

[40068]无法执行脚本测试**
我试过很多方法,找到了一个解决办法,但都行不通。
错误消息上的路径不在我的PC上。(文件夹_MEI401122不存在)
在我的PC上,_audio_microfrontend_op. so文件位于下图所示的路径中。
我不知道。请帮帮我。

我添加了源代码和错误内容。
我的项目文件夹路径如下。
路径:C:\用户\用户名\下载\eye_blink_detector-master
[My错误内容图像]

[My源代码]

# -*- coding: utf-8 -*-
import cv2, dlib
import numpy as np
from imutils import face_utils
from keras.models import load_model
from time import localtime, strftime
from datetime import datetime
import time
from tkinter import *
import tkinter.messagebox

root = Tk()

WELCOME_MSG = '''Welcome to this event.'''
WELCOME_DURATION = 2000

def welcome():
    top = tkinter.Toplevel()
    top.title('Welcome')
    Message(top, text="카운트", padx=20, pady=20).pack()
    top.after(WELCOME_DURATION, top.destroy)

IMG_SIZE = (34, 26)

#root.mainloop()

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

model = load_model('models/2018_12_17_22_58_35.h5')
model.summary()

count = 0
count_eye_open = 0

f = open("d:/새파일.txt", 'a')

def crop_eye(img, eye_points):
  x1, y1 = np.amin(eye_points, axis=0)
  x2, y2 = np.amax(eye_points, axis=0)
  cx, cy = (x1 + x2) / 2, (y1 + y2) / 2

  w = (x2 - x1) * 1.2
  h = w * IMG_SIZE[1] / IMG_SIZE[0]

  margin_x, margin_y = w / 2, h / 2

  min_x, min_y = int(cx - margin_x), int(cy - margin_y)
  max_x, max_y = int(cx + margin_x), int(cy + margin_y)

  eye_rect = np.rint([min_x, min_y, max_x, max_y]).astype(np.int)

  eye_img = gray[eye_rect[1]:eye_rect[3], eye_rect[0]:eye_rect[2]]

  return eye_img, eye_rect

# main

cap = cv2.VideoCapture(0) #'videos/2.mp4')

while cap.isOpened():
  ret, img_ori = cap.read()

  if not ret:
    break

#윈도우 사이즈
  img_ori = cv2.resize(img_ori, dsize=(0, 0), fx=1.0, fy=1.0)

  img = img_ori.copy()
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

  faces = detector(gray)

  dt = datetime.now()

  for face in faces:

    shapes = predictor(gray, face)
    shapes = face_utils.shape_to_np(shapes)

    eye_img_l, eye_rect_l = crop_eye(gray, eye_points=shapes[36:41]) #l_eye_poits = [36, 37, 38, 39, 40, 41] 원소스: [36,42]
    eye_img_r, eye_rect_r = crop_eye(gray, eye_points=shapes[42:47]) #r_eye_points = [42, 43, 44, 45, 46, 47] 원소스: [42:48]

    eye_img_l = cv2.resize(eye_img_l, dsize=IMG_SIZE)
    eye_img_r = cv2.resize(eye_img_r, dsize=IMG_SIZE)
    eye_img_r = cv2.flip(eye_img_r, flipCode=1)

    cv2.imshow('l', eye_img_l)
    cv2.imshow('r', eye_img_r)

    eye_input_l = eye_img_l.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.
    eye_input_r = eye_img_r.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.

    pred_l = model.predict(eye_input_l)
    pred_r = model.predict(eye_input_r)

    # visualize
    state_l = '%.2f' if pred_l > 0.1 else '-%.1f'
    state_r = '%.2f' if pred_r > 0.1 else '-%.1f'

    state_l = state_l % pred_l
    state_r = state_r % pred_r

    # Blink Count
    if pred_l <= 0.1 and pred_r <= 0.1:
        count_eye_open += 1
        print("blinking, "+ str(dt.strftime('%Y-%m-%d %H:%M:%S.%f')))

    cv2.rectangle(img, pt1=tuple(eye_rect_l[0:2]), pt2=tuple(eye_rect_l[2:4]), color=(255,255,255), thickness=2)
    cv2.rectangle(img, pt1=tuple(eye_rect_r[0:2]), pt2=tuple(eye_rect_r[2:4]), color=(255,255,255), thickness=2)

    cv2.putText(img, state_l, tuple(eye_rect_l[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
    cv2.putText(img, state_r, tuple(eye_rect_r[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)

    cv2.putText(img, "eye blink: " + str(count_eye_open), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    cv2.putText(img, "Time: " + str(strftime("%S", localtime())), (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))

    if str(strftime("%S", localtime())) == "00":
        count += 1
        if count == 1 and count_eye_open > 0:
            print("Transfer Data...:" + str(count_eye_open))
            f.write("Transfer Data...:" + str(count_eye_open) + "\n")
            count_eye_open = 0
            count = 0
    else:
      count = 0

    time.sleep(0.12)

  cv2.imshow('result', img)
  if cv2.waitKey(1) == ord('q'):
    f.close()
    break
h7wcgrx3

h7wcgrx31#

我可以通过手动指定将_audio_microfrontend_op. so文件复制到spec文件中来修复此错误。

# -*- mode: python ; coding: utf-8 -*-

import os
import importlib

a = Analysis(
        (...)
             datas=[(os.path.join(os.path.dirname(importlib.import_module('tensorflow').__file__),
                                  "lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so"),
                     "tensorflow/lite/experimental/microfrontend/python/ops/")],
        (...)
)
ilmyapht

ilmyapht2#

对于那些仍然对目录中的规范感到困惑的人,你原来的www.example.com在哪里filename.py,请将其添加到datas = []中

(os.path.join(os.path.dirname(importlib.import_module('tensorflow').__file__),
                              "lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so"),
                 "tensorflow/lite/experimental/microfrontend/python/ops/")

然后把那里导入

import os
import importlib

然后运行这个程序,根据您更改的规范进行编译。

pyinstaller filename.spec
ctrmrzij

ctrmrzij3#

没有足够的代表发表评论,所以我只发布一个澄清问题的答案。正如Mieszko在他的答案中提到的,你需要手动指定复制 _audio_microfrontend_op.so 文件。
为此,打开原始 filename.py 文件所在的目录,你会发现一个名为 filename.spec 的文件,在编辑器中打开这个文件,并添加Mieszko指定的修改:

# -*- mode: python ; coding: utf-8 -*-

import os
import importlib

a = Analysis(
    (...)
         datas=[(os.path.join(os.path.dirname(importlib.import_module('tensorflow').__file__),
                              "lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so"),
                 "tensorflow/lite/experimental/microfrontend/python/ops/")],
    (...)
)

完成这些更改后,保存文件,然后再次运行pyinstaller**,使用.spec文件而不是.py文件**

pyinstaller filename.spec

相关问题