python读取matlab数据(.m文件 .mat文件)

x33g5p2x  于2022-04-17 转载在 Python  
字(9.4k)|赞(0)|评价(0)|浏览(571)

首先导入scipy的包 from scipy.io import loadmat
然后读取 m = loadmat(“F:/__identity/activity/论文/data/D001.mat”)
注意这里m是一个dict数据结构

4.接下来就是用Python读取上一步中保存的matlab工作区的数据Data。Python中我们需要用到scipy库,这里我们先import进去

import scipy.io as scio

data=scio.loadmat('./matlab.mat')

type(data)

输出的为dict字典类型

7.读取对应我们想要的数据

这里我们假设需要将数据matlab_y读进python中(这里我们用numpy库将数据转化为数组类型)

import numpy as np #导入矩阵处理库
python_y=np.array(data['matlab_y']) #将matlab数据赋值给python变量

至此,就完成了使用python读取matlab数据。
————————————————
版权声明:本文为CSDN博主「刚开始的人生」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/qq_44946715/article/details/119932640

表情识别的处理:

https://github.com/chenxindaaa/emotic/blob/main/mat2py.py

  1. import argparse
  2. import csv
  3. import cv2
  4. import numpy as np
  5. import os
  6. from scipy.io import loadmat
  7. class emotic_train:
  8. def __init__(self, filename, folder, image_size, person):
  9. self.filename = filename
  10. self.folder = folder
  11. self.im_size = []
  12. self.bbox = []
  13. self.cat = []
  14. self.cont = []
  15. self.gender = person[3][0]
  16. self.age = person[4][0]
  17. self.cat_annotators = 0
  18. self.cont_annotators = 0
  19. self.set_imsize(image_size)
  20. self.set_bbox(person[0])
  21. self.set_cat(person[1])
  22. self.set_cont(person[2])
  23. self.check_cont()
  24. def set_imsize(self, image_size):
  25. image_size = np.array(image_size).flatten().tolist()[0]
  26. row = np.array(image_size[0]).flatten().tolist()[0]
  27. col = np.array(image_size[1]).flatten().tolist()[0]
  28. self.im_size.append(row)
  29. self.im_size.append(col)
  30. def validate_bbox(self, bbox):
  31. x1, y1, x2, y2 = bbox
  32. x1 = min(self.im_size[0], max(0, x1))
  33. x2 = min(self.im_size[0], max(0, x2))
  34. y1 = min(self.im_size[1], max(0, y1))
  35. y2 = min(self.im_size[1], max(0, y2))
  36. return [int(x1), int(y1), int(x2), int(y2)]
  37. def set_bbox(self, person_bbox):
  38. self.bbox = self.validate_bbox(np.array(person_bbox).flatten().tolist())
  39. def set_cat(self, person_cat):
  40. cat = np.array(person_cat).flatten().tolist()
  41. cat = np.array(cat[0]).flatten().tolist()
  42. self.cat = [np.array(c).flatten().tolist()[0] for c in cat]
  43. self.cat_annotators = 1
  44. def set_cont(self, person_cont):
  45. cont = np.array(person_cont).flatten().tolist()[0]
  46. self.cont = [np.array(c).flatten().tolist()[0] for c in cont]
  47. self.cont_annotators = 1
  48. def check_cont(self):
  49. for c in self.cont:
  50. if np.isnan(c):
  51. self.cont_annotators = 0
  52. break
  53. class emotic_test:
  54. def __init__(self, filename, folder, image_size, person):
  55. self.filename = filename
  56. self.folder = folder
  57. self.im_size = []
  58. self.bbox = []
  59. self.cat = []
  60. self.cat_annotators = 0
  61. self.comb_cat = []
  62. self.cont_annotators = 0
  63. self.cont = []
  64. self.comb_cont = []
  65. self.gender = person[5][0]
  66. self.age = person[6][0]
  67. self.set_imsize(image_size)
  68. self.set_bbox(person[0])
  69. self.set_cat(person[1])
  70. self.set_comb_cat(person[2])
  71. self.set_cont(person[3])
  72. self.set_comb_cont(person[4])
  73. self.check_cont()
  74. def set_imsize(self, image_size):
  75. image_size = np.array(image_size).flatten().tolist()[0]
  76. row = np.array(image_size[0]).flatten().tolist()[0]
  77. col = np.array(image_size[1]).flatten().tolist()[0]
  78. self.im_size.append(row)
  79. self.im_size.append(col)
  80. def validate_bbox(self, bbox):
  81. x1, y1, x2, y2 = bbox
  82. x1 = min(self.im_size[0], max(0, x1))
  83. x2 = min(self.im_size[0], max(0, x2))
  84. y1 = min(self.im_size[1], max(0, y1))
  85. y2 = min(self.im_size[1], max(0, y2))
  86. return [int(x1), int(y1), int(x2), int(y2)]
  87. def set_bbox(self, person_bbox):
  88. self.bbox = self.validate_bbox(np.array(person_bbox).flatten().tolist())
  89. def set_cat(self, person_cat):
  90. self.cat_annotators = len(person_cat[0])
  91. for ann in range(self.cat_annotators):
  92. ann_cat = person_cat[0][ann]
  93. ann_cat = np.array(ann_cat).flatten().tolist()
  94. ann_cat = np.array(ann_cat[0]).flatten().tolist()
  95. ann_cat = [np.array(c).flatten().tolist()[0] for c in ann_cat]
  96. self.cat.append(ann_cat)
  97. def set_comb_cat(self, person_comb_cat):
  98. if self.cat_annotators != 0:
  99. self.comb_cat = [np.array(c).flatten().tolist()[0] for c in person_comb_cat[0]]
  100. else:
  101. self.comb_cat = []
  102. def set_comb_cont(self, person_comb_cont):
  103. if self.cont_annotators != 0:
  104. comb_cont = [np.array(c).flatten().tolist()[0] for c in person_comb_cont[0]]
  105. self.comb_cont = [np.array(c).flatten().tolist()[0] for c in comb_cont[0]]
  106. else:
  107. self.comb_cont = []
  108. def set_cont(self, person_cont):
  109. self.cont_annotators = len(person_cont[0])
  110. for ann in range(self.cont_annotators):
  111. ann_cont = person_cont[0][ann]
  112. ann_cont = np.array(ann_cont).flatten().tolist()
  113. ann_cont = np.array(ann_cont[0]).flatten().tolist()
  114. ann_cont = [np.array(c).flatten().tolist()[0] for c in ann_cont]
  115. self.cont.append(ann_cont)
  116. def check_cont(self):
  117. for c in self.comb_cont:
  118. if np.isnan(c):
  119. self.cont_annotators = 0
  120. break
  121. def cat_to_one_hot(y_cat):
  122. '''
  123. One hot encode a categorical label.
  124. :param y_cat: Categorical label.
  125. :return: One hot encoded categorical label.
  126. '''
  127. one_hot_cat = np.zeros(26)
  128. for em in y_cat:
  129. one_hot_cat[cat2ind[em]] = 1
  130. return one_hot_cat
  131. def prepare_data(data_mat, data_path_src, save_dir, dataset_type='train', generate_npy=False, debug_mode=False):
  132. '''
  133. Prepare csv files and save preprocessed data in npy files.
  134. :param data_mat: Mat data object for a label.
  135. :param data_path_src: Path of the parent directory containing the emotic images folders (mscoco, framesdb, emodb_small, ade20k)
  136. :param save_dir: Path of the directory to save the csv files and the npy files (if generate_npy files is True)
  137. :param dataset_type: Type of the dataset (train, val or test). Variable used in the name of csv files and npy files.
  138. :param generate_npy: If True the data is preprocessed and saved in npy files. Npy files are later used for training.
  139. '''
  140. data_set = list()
  141. if generate_npy:
  142. context_arr = list()
  143. body_arr = list()
  144. cat_arr = list()
  145. cont_arr = list()
  146. to_break = 0
  147. path_not_exist = 0
  148. cat_cont_zero = 0
  149. idx = 0
  150. for ex_idx, ex in enumerate(data_mat[0]):
  151. nop = len(ex[4][0])
  152. for person in range(nop):
  153. if dataset_type == 'train':
  154. et = emotic_train(ex[0][0],ex[1][0],ex[2],ex[4][0][person])
  155. else:
  156. et = emotic_test(ex[0][0],ex[1][0],ex[2],ex[4][0][person])
  157. try:
  158. image_path = os.path.join(data_path_src,et.folder,et.filename)
  159. if not os.path.exists(image_path):
  160. path_not_exist += 1
  161. print ('path not existing', ex_idx, image_path)
  162. continue
  163. else:
  164. context = cv2.cvtColor(cv2.imread(image_path),cv2.COLOR_BGR2RGB)
  165. body = context[et.bbox[1]:et.bbox[3],et.bbox[0]:et.bbox[2]].copy()
  166. context_cv = cv2.resize(context, (224,224))
  167. body_cv = cv2.resize(body, (128,128))
  168. except Exception as e:
  169. to_break += 1
  170. if debug_mode == True:
  171. print ('breaking at idx=%d, %d due to exception=%r' %(ex_idx, idx, e))
  172. continue
  173. if (et.cat_annotators == 0 or et.cont_annotators == 0):
  174. cat_cont_zero += 1
  175. continue
  176. data_set.append(et)
  177. if generate_npy == True:
  178. context_arr.append(context_cv)
  179. body_arr.append(body_cv)
  180. if dataset_type == 'train':
  181. cat_arr.append(cat_to_one_hot(et.cat))
  182. cont_arr.append(np.array(et.cont))
  183. else:
  184. cat_arr.append(cat_to_one_hot(et.comb_cat))
  185. cont_arr.append(np.array(et.comb_cont))
  186. if idx % 1000 == 0 and debug_mode==False:
  187. print (" Preprocessing data. Index = ", idx)
  188. elif idx % 20 == 0 and debug_mode==True:
  189. print (" Preprocessing data. Index = ", idx)
  190. idx = idx + 1
  191. # for debugging purposes
  192. if debug_mode == True and idx >= 104:
  193. print (' ######## Breaking data prep step', idx, ex_idx, ' ######')
  194. print (to_break, path_not_exist, cat_cont_zero)
  195. cv2.imwrite(os.path.join(save_dir, 'context1.png'), context_arr[-1])
  196. cv2.imwrite(os.path.join(save_dir, 'body1.png'), body_arr[-1])
  197. break
  198. print (to_break, path_not_exist, cat_cont_zero)
  199. csv_path = os.path.join(save_dir, "%s.csv" %(dataset_type))
  200. with open(csv_path, 'w') as csvfile:
  201. filewriter = csv.writer(csvfile, delimiter=',', dialect='excel')
  202. row = ['Index', 'Folder', 'Filename', 'Image Size', 'BBox', 'Categorical_Labels', 'Continuous_Labels', 'Gender', 'Age']
  203. filewriter.writerow(row)
  204. for idx, ex in enumerate(data_set):
  205. if dataset_type == 'train':
  206. row = [idx, ex.folder, ex.filename, ex.im_size, ex.bbox, ex.cat, ex.cont, ex.gender, ex.age]
  207. else:
  208. row = [idx, ex.folder, ex.filename, ex.im_size, ex.bbox, ex.comb_cat, ex.comb_cont, ex.gender, ex.age]
  209. filewriter.writerow(row)
  210. print ('wrote file ', csv_path)
  211. if generate_npy == True:
  212. context_arr = np.array(context_arr)
  213. body_arr = np.array(body_arr)
  214. cat_arr = np.array(cat_arr)
  215. cont_arr = np.array(cont_arr)
  216. print (len(data_set), context_arr.shape, body_arr.shape)
  217. np.save(os.path.join(save_dir,'%s_context_arr.npy' %(dataset_type)), context_arr)
  218. np.save(os.path.join(save_dir,'%s_body_arr.npy' %(dataset_type)), body_arr)
  219. np.save(os.path.join(save_dir,'%s_cat_arr.npy' %(dataset_type)), cat_arr)
  220. np.save(os.path.join(save_dir,'%s_cont_arr.npy' %(dataset_type)), cont_arr)
  221. print (context_arr.shape, body_arr.shape, cat_arr.shape, cont_arr.shape)
  222. print ('completed generating %s data files' %(dataset_type))
  223. def parse_args():
  224. parser = argparse.ArgumentParser()
  225. parser.add_argument('--data_dir', type=str,default=r'I:\project\deblur\emotic', help='Path to Emotic data and annotations')
  226. parser.add_argument('--save_dir_name', type=str, default='emotic_pre', help='Directory name in which preprocessed data will be stored')
  227. parser.add_argument('--label', type=str, default='all', choices=['train', 'val', 'test', 'all'])
  228. parser.add_argument('--generate_npy', action='store_true', help='Generate npy files')
  229. parser.add_argument('--debug_mode', action='store_true', help='Debug mode. Will only save a small subset of the data')
  230. # Generate args
  231. args = parser.parse_args()
  232. return args
  233. if __name__ == '__main__':
  234. args = parse_args()
  235. ann_path_src = os.path.join(args.data_dir, 'Annotations','Annotations.mat')
  236. data_path_src = os.path.join(args.data_dir, 'emotic')
  237. save_path = os.path.join(args.data_dir, args.save_dir_name)
  238. if not os.path.exists(save_path):
  239. os.makedirs(save_path)
  240. cat = ['Affection', 'Anger', 'Annoyance', 'Anticipation', 'Aversion', 'Confidence', 'Disapproval', 'Disconnection',
  241. 'Disquietment', 'Doubt/Confusion', 'Embarrassment', 'Engagement', 'Esteem', 'Excitement', 'Fatigue', 'Fear',
  242. 'Happiness', 'Pain', 'Peace', 'Pleasure', 'Sadness', 'Sensitivity', 'Suffering', 'Surprise', 'Sympathy', 'Yearning']
  243. cat2ind = {}
  244. ind2cat = {}
  245. for idx, emotion in enumerate(cat):
  246. cat2ind[emotion] = idx
  247. ind2cat[idx] = emotion
  248. print ('loading Annotations')
  249. mat = loadmat(ann_path_src)
  250. if args.label.lower() == 'all':
  251. labels = ['train', 'val', 'test']
  252. else:
  253. labels = [args.label.lower()]
  254. for label in labels:
  255. data_mat = mat[label]
  256. print ('starting label ', label)
  257. prepare_data(data_mat, data_path_src, save_path, dataset_type=label, generate_npy=args.generate_npy, debug_mode=args.debug_mode)

创作挑战赛

新人创作奖励来咯,坚持创作打卡瓜分现金大奖

相关文章