Browse Source

add baseline

liuyuqi-dellpc 2 years ago
parent
commit
3ab5cb5534
3 changed files with 373 additions and 0 deletions
  1. 356 0
      1.baseline.ipynb
  2. 8 0
      README.md
  3. 9 0
      requirements.txt

+ 356 - 0
1.baseline.ipynb

@@ -0,0 +1,356 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 声音识别项目介绍\n",
+    "\n",
+    "\n",
+    "\n",
+    "## 开发环境\n",
+    "\n",
+    "* TensorFlow的版本:2.0 +\n",
+    "* keras\n",
+    "* sklearn\n",
+    "* librosa\n",
+    "\n",
+    "## 下载数据\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!wget http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531887/train_sample.zip\n",
+    "!wget http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531887/test_a.zip\n",
+    "\n",
+    "\n",
+    "!unzip -qq train_sample.zip\n",
+    "!\\rm train_sample.zip\n",
+    "\n",
+    "!unzip -qq test_a.zip\n",
+    "!\\rm test_a.zip"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 安装语音处理依赖\n",
+    "!pip install librosa --user"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 基本库\n",
+    "\n",
+    "import pandas as pd\n",
+    "import numpy as np\n",
+    "\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "from sklearn.metrics import classification_report\n",
+    "from sklearn.model_selection import GridSearchCV\n",
+    "\n",
+    "from sklearn.preprocessing import MinMaxScaler\n",
+    "\n",
+    "\n",
+    "from tensorflow.keras.models import Sequential\n",
+    "from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPool2D, Dropout\n",
+    "from tensorflow.keras.utils import to_categorical \n",
+    "\n",
+    "from sklearn.ensemble import RandomForestClassifier\n",
+    "from sklearn.svm import SVC\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 数据预处理\n",
+    "\n",
+    "特征提取以及数据集的建立\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "feature = []\n",
+    "label = []\n",
+    "# 建立类别标签,不同类别对应不同的数字。\n",
+    "label_dict = {'aloe': 0, 'burger': 1, 'cabbage': 2,'candied_fruits':3, 'carrots': 4, 'chips':5,\n",
+    "                  'chocolate': 6, 'drinks': 7, 'fries': 8, 'grapes': 9, 'gummies': 10, 'ice-cream':11,\n",
+    "                  'jelly': 12, 'noodles': 13, 'pickles': 14, 'pizza': 15, 'ribs': 16, 'salmon':17,\n",
+    "                  'soup': 18, 'wings': 19}\n",
+    "label_dict_inv = {v:k for k,v in label_dict.items()}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from tqdm import tqdm\n",
+    "def extract_features(parent_dir, sub_dirs, max_file=10, file_ext=\"*.wav\"):\n",
+    "    c = 0\n",
+    "    label, feature = [], []\n",
+    "    for sub_dir in sub_dirs:\n",
+    "        for fn in tqdm(glob.glob(os.path.join(parent_dir, sub_dir, file_ext))[:max_file]): # 遍历数据集的所有文件\n",
+    "            \n",
+    "           # segment_log_specgrams, segment_labels = [], []\n",
+    "            #sound_clip,sr = librosa.load(fn)\n",
+    "            #print(fn)\n",
+    "            label_name = fn.split('/')[-2]\n",
+    "            label.extend([label_dict[label_name]])\n",
+    "            X, sample_rate = librosa.load(fn,res_type='kaiser_fast')\n",
+    "            mels = np.mean(librosa.feature.melspectrogram(y=X,sr=sample_rate).T,axis=0) # 计算梅尔频谱(mel spectrogram),并把它作为特征\n",
+    "            feature.extend([mels])\n",
+    "            \n",
+    "    return [feature, label]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 自己更改目录\n",
+    "parent_dir = './train_sample/'\n",
+    "save_dir = \"./\"\n",
+    "folds = sub_dirs = np.array(['aloe','burger','cabbage','candied_fruits',\n",
+    "                             'carrots','chips','chocolate','drinks','fries',\n",
+    "                            'grapes','gummies','ice-cream','jelly','noodles','pickles',\n",
+    "                            'pizza','ribs','salmon','soup','wings'])\n",
+    "\n",
+    "# 获取特征feature以及类别的label\n",
+    "temp = extract_features(parent_dir,sub_dirs,max_file=100)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "temp = np.array(temp)\n",
+    "data = temp.transpose()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 获取特征\n",
+    "X = np.vstack(data[:, 0])\n",
+    "\n",
+    "# 获取标签\n",
+    "Y = np.array(data[:, 1])\n",
+    "print('X的特征尺寸是:',X.shape)\n",
+    "print('Y的特征尺寸是:',Y.shape)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 在Keras库中:to_categorical就是将类别向量转换为二进制(只有0和1)的矩阵类型表示\n",
+    "Y = to_categorical(Y)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "'''最终数据'''\n",
+    "print(X.shape)\n",
+    "print(Y.shape)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 1, stratify=Y)\n",
+    "print('训练集的大小',len(X_train))\n",
+    "print('测试集的大小',len(X_test))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "X_train = X_train.reshape(-1, 16, 8, 1)\n",
+    "X_test = X_test.reshape(-1, 16, 8, 1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 搭建CNN网络¶\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model = Sequential()\n",
+    "\n",
+    "# 输入的大小\n",
+    "input_dim = (16, 8, 1)\n",
+    "\n",
+    "model.add(Conv2D(64, (3, 3), padding = \"same\", activation = \"tanh\", input_shape = input_dim))# 卷积层\n",
+    "model.add(MaxPool2D(pool_size=(2, 2)))# 最大池化\n",
+    "model.add(Conv2D(128, (3, 3), padding = \"same\", activation = \"tanh\")) #卷积层\n",
+    "model.add(MaxPool2D(pool_size=(2, 2))) # 最大池化层\n",
+    "model.add(Dropout(0.1))\n",
+    "model.add(Flatten()) # 展开\n",
+    "model.add(Dense(1024, activation = \"tanh\"))\n",
+    "model.add(Dense(20, activation = \"softmax\")) # 输出层:20个units输出20个类的概率\n",
+    "\n",
+    "# 编译模型,设置损失函数,优化方法以及评价标准\n",
+    "model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model.summary()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 训练模型\n",
+    "model.fit(X_train, Y_train, epochs = 20, batch_size = 15, validation_data = (X_test, Y_test))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#预测\n",
+    "\n",
+    "def extract_features(test_dir, file_ext=\"*.wav\"):\n",
+    "    feature = []\n",
+    "    for fn in tqdm(glob.glob(os.path.join(test_dir, file_ext))[:]): # 遍历数据集的所有文件\n",
+    "        X, sample_rate = librosa.load(fn,res_type='kaiser_fast')\n",
+    "        mels = np.mean(librosa.feature.melspectrogram(y=X,sr=sample_rate).T,axis=0) # 计算梅尔频谱(mel spectrogram),并把它作为特征\n",
+    "        feature.extend([mels])\n",
+    "    return feature\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "X_test = extract_features('./test_a/')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "X_test = np.vstack(X_test)\n",
+    "predictions = model.predict(X_test.reshape(-1, 16, 8, 1))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "preds = np.argmax(predictions, axis = 1)\n",
+    "preds = [label_dict_inv[x] for x in preds]\n",
+    "\n",
+    "path = glob.glob('./test_a/*.wav')\n",
+    "result = pd.DataFrame({'name':path, 'label': preds})\n",
+    "\n",
+    "result['name'] = result['name'].apply(lambda x: x.split('/')[-1])\n",
+    "result.to_csv('submit.csv',index=None)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls ./test_a/*.wav | wc -l"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!wc -l submit.csv"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "plaintext"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 8 - 0
README.md

@@ -1,2 +1,10 @@
 # tianchi_voice_Identify
 天池声音识别大赛 google colab 实验
+
+
+### 环境
+
+```
+pip install -r requirements.txt
+
+```

+ 9 - 0
requirements.txt

@@ -0,0 +1,9 @@
+
+keras=2.0.8
+sklearn=0.20.3
+pandas=0.23.4
+matplotlib=3.0.3
+numpy=1.16.2
+librosa=0.6.2
+tensorflow-gpu=2.0.0-beta1
+