From bb30862443a8943646c5b48fc7ce07fcc785974b Mon Sep 17 00:00:00 2001 From: 20200913032 <50436448@qq.com> Date: Sun, 29 Nov 2020 11:27:19 +0800 Subject: [PATCH] Upload New File --- starter_code.ipynb | 419 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 419 insertions(+) create mode 100644 starter_code.ipynb diff --git a/starter_code.ipynb b/starter_code.ipynb new file mode 100644 index 0000000..b38b769 --- /dev/null +++ b/starter_code.ipynb @@ -0,0 +1,419 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 搭建一个分词工具\n", + "\n", + "### Part 1 基于枚举方法来搭建中文分词工具\n", + "\n", + "此项目需要的数据:\n", + "1. 综合类中文词库.xlsx: 包含了中文词,当做词典来用\n", + "2. 以变量的方式提供了部分unigram概率 word_prob\n", + "\n", + "\n", + "举个例子: 给定词典=[我们 学习 人工 智能 人工智能 未来 是], 另外我们给定unigram概率:p(我们)=0.25, p(学习)=0.15, p(人工)=0.05, p(智能)=0.1, p(人工智能)=0.2, p(未来)=0.1, p(是)=0.15\n", + "\n", + "#### Step 1: 对于给定字符串:”我们学习人工智能,人工智能是未来“, 找出所有可能的分割方式\n", + "- [我们,学习,人工智能,人工智能,是,未来]\n", + "- [我们,学习,人工,智能,人工智能,是,未来]\n", + "- [我们,学习,人工,智能,人工,智能,是,未来]\n", + "- [我们,学习,人工智能,人工,智能,是,未来]\n", + ".......\n", + "\n", + "\n", + "#### Step 2: 我们也可以计算出每一个切分之后句子的概率\n", + "- p(我们,学习,人工智能,人工智能,是,未来)= -log p(我们)-log p(学习)-log p(人工智能)-log p(人工智能)-log p(是)-log p(未来)\n", + "- p(我们,学习,人工,智能,人工智能,是,未来)=-log p(我们)-log p(学习)-log p(人工)-log p(智能)-log p(人工智能)-log p(是)-log p(未来)\n", + "- p(我们,学习,人工,智能,人工,智能,是,未来)=-log p(我们)-log p(学习)-log p(人工)-log p(智能)-log p(人工)-log p(智能)-log p(是)-log p(未来)\n", + "- p(我们,学习,人工智能,人工,智能,是,未来)=-log p(我们)-log p(学习)-log p(人工智能)-log p(人工)-log p(智能)-log(是)-log p(未来)\n", + ".....\n", + "\n", + "#### Step 3: 返回第二步中概率最大的结果" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### https://blog.csdn.net/sinat_21250935/article/details/104797013" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import xlrd\n", + "import numpy as np\n", + "\n", + "def load_word_dic(file_path, sheet_idx = 0, col_idx = 0):\n", + " file = xlrd.open_workbook(file_path)\n", + " table = file.sheets()[sheet_idx]\n", + " col_value = table.col_values(col_idx)\n", + " \n", + " dic_words = {}\n", + " for word in col_value:\n", + " dic_words[word] = 0.00001\n", + " \n", + " return dic_words\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1.0000000000000002\n" + ] + } + ], + "source": [ + "# TODO: 第一步: 从综合类中文词库.xlsx 中读取所有中文词。\n", + "# hint: 思考一下用什么数据结构来存储这个词典会比较好? 要考虑我们每次查询一个单词的效率。 \n", + "dic_words = load_word_dic(file_path = '综合类中文词库.xlsx') # 保存词典库中读取的单词\n", + "\n", + "# 以下是每一个单词出现的概率。为了问题的简化,我们只列出了一小部分单词的概率。 在这里没有出现的的单词但是出现在词典里的,统一把概率设置成为0.00001\n", + "# 比如 p(\"学院\")=p(\"概率\")=...0.00001\n", + "\n", + "word_prob = {\"北京\":0.03,\"的\":0.08,\"天\":0.005,\"气\":0.005,\"天气\":0.06,\"真\":0.04,\"好\":0.05,\"真好\":0.04,\"啊\":0.01,\"真好啊\":0.02, \n", + " \"今\":0.01,\"今天\":0.07,\"课程\":0.06,\"内容\":0.06,\"有\":0.05,\"很\":0.03,\"很有\":0.04,\"意思\":0.06,\"有意思\":0.005,\"课\":0.01,\n", + " \"程\":0.005,\"经常\":0.08,\"意见\":0.08,\"意\":0.01,\"见\":0.005,\"有意见\":0.02,\"分歧\":0.04,\"分\":0.02, \"歧\":0.005}\n", + "\n", + "\n", + "for key, value in word_prob.items():\n", + " dic_words[key] = value\n", + " #print(dic_words[key])\n", + " \n", + "print (sum(word_prob.values()))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def get_segments_recurr(input_str):\n", + " #input_str = '今天天气好'\n", + " len_input = len(input_str)\n", + "\n", + " segments = []\n", + " if (len_input == 0): return []\n", + "\n", + " for i in range(1, len_input+1):\n", + " seg_str = input_str[:i]\n", + " if seg_str in dic_words:\n", + " seg_substr_list = get_segments_recurr(input_str[i:])\n", + "\n", + " if (len(input_str[i:])== 0 and len(seg_substr_list)== 0):\n", + " segments.append([seg_str])\n", + " else:\n", + " for seg_substr in seg_substr_list:\n", + " seg_substr = [seg_str] + seg_substr\n", + " segments.append(seg_substr)\n", + " return segments\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "## TODO 请编写word_segment_naive函数来实现对输入字符串的分词\n", + "def word_segment_naive(input_str):\n", + " \"\"\"\n", + " 1. 对于输入字符串做分词,并返回所有可行的分词之后的结果。\n", + " 2. 针对于每一个返回结果,计算句子的概率\n", + " 3. 返回概率最高的最作为最后结果\n", + " \n", + " input_str: 输入字符串 输入格式:“今天天气好”\n", + " best_segment: 最好的分词结果 输出格式:[\"今天\",\"天气\",\"好\"]\n", + " \"\"\"\n", + "\n", + " # TODO: 第一步: 计算所有可能的分词结果,要保证每个分完的词存在于词典里,这个结果有可能会非常多。 \n", + " segments = [] # 存储所有分词的结果。如果次字符串不可能被完全切分,则返回空列表(list)\n", + " # 格式为:segments = [[\"今天\",“天气”,“好”],[\"今天\",“天“,”气”,“好”],[\"今“,”天\",“天气”,“好”],...]\n", + " \n", + " segments = get_segments_recurr(input_str)\n", + " \n", + " # TODO: 第二步:循环所有的分词结果,并计算出概率最高的分词结果,并返回\n", + " best_segment = []\n", + " best_score = np.inf\n", + " for seg in segments:\n", + " score = 0\n", + " for token in seg:\n", + " score += -1*np.log(dic_words[token])\n", + "\n", + " if (best_score > score):\n", + " best_score = score\n", + " best_segment = seg\n", + " \n", + " return best_segment " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['北京', '的', '天气', '真好啊']\n", + "['今天', '的', '课程', '内容', '很有', '意思']\n", + "['经常', '有意见', '分歧']\n" + ] + } + ], + "source": [ + "# 测试\n", + "print(word_segment_naive('北京的天气真好啊'))\n", + "print(word_segment_naive(\"今天的课程内容很有意思\"))\n", + "print(word_segment_naive(\"经常有意见分歧\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Part 2 基于维特比算法来优化上述流程\n", + "\n", + "此项目需要的数据:\n", + "1. 综合类中文词库.xlsx: 包含了中文词,当做词典来用\n", + "2. 以变量的方式提供了部分unigram概率word_prob\n", + "\n", + "\n", + "举个例子: 给定词典=[我们 学习 人工 智能 人工智能 未来 是], 另外我们给定unigram概率:p(我们)=0.25, p(学习)=0.15, p(人工)=0.05, p(智能)=0.1, p(人工智能)=0.2, p(未来)=0.1, p(是)=0.15\n", + "\n", + "#### Step 1: 根据词典,输入的句子和 word_prob来创建带权重的有向图(Directed Graph) 参考:课程内容\n", + "有向图的每一条边是一个单词的概率(只要存在于词典里的都可以作为一个合法的单词),这些概率已经给出(存放在word_prob)。\n", + "注意:思考用什么方式来存储这种有向图比较合适? 不一定只有一种方式来存储这种结构。 \n", + "\n", + "#### Step 2: 编写维特比算法(viterebi)算法来找出其中最好的PATH, 也就是最好的句子切分\n", + "具体算法参考课程中讲过的内容\n", + "\n", + "#### Step 3: 返回结果\n", + "跟PART 1的要求一致" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "EPSILON = 1e-10" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "def get_directed_graph(input_str):\n", + " input_str = '北京的天气真好啊'\n", + " len_input_str = len(input_str)\n", + " # print(len_input_str)\n", + "\n", + " input_matrix = np.zeros((len_input_str, len_input_str))\n", + " #print(input_matrix[1][3])\n", + "\n", + " for i in range(len_input_str):\n", + " for j in range(i, len_input_str):\n", + " token = input_str[i:j+1]\n", + " if token in dic_words:\n", + " input_matrix[i][j] = dic_words[token]\n", + "\n", + " graph = pd.DataFrame(data=np.array(input_matrix), index=list(range(len_input_str)),columns=list(range(1, len_input_str + 1)))\n", + " graph = -1 * np.log(graph+EPSILON)\n", + "\n", + " return graph" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "## TODO 请编写word_segment_viterbi函数来实现对输入字符串的分词\n", + "def word_segment_viterbi(input_str):\n", + " \"\"\"\n", + " 1. 基于输入字符串,词典,以及给定的unigram概率来创建DAG(有向图)。\n", + " 2. 编写维特比算法来寻找最优的PATH\n", + " 3. 返回分词结果\n", + " \n", + " input_str: 输入字符串 输入格式:“今天天气好”\n", + " best_segment: 最好的分词结果 输出格式:[\"今天\",\"天气\",\"好\"]\n", + " \"\"\"\n", + " \n", + " # TODO: 第一步:根据词典,输入的句子,以及给定的unigram概率来创建带权重的有向图(Directed Graph) 参考:课程内容\n", + " # 有向图的每一条边是一个单词的概率(只要存在于词典里的都可以作为一个合法的单词),这些概率在 word_prob,如果不在word_prob里的单词但在\n", + " # 词典里存在的,统一用概率值0.00001。\n", + " # 注意:思考用什么方式来存储这种有向图比较合适? 不一定有只有一种方式来存储这种结构。 \n", + " graph = get_deirected_graph(input_str)\n", + " \n", + " \n", + " # TODO: 第二步: 利用维特比算法来找出最好的PATH, 这个PATH是P(sentence)最大或者 -log P(sentence)最小的PATH。\n", + " # hint: 思考为什么不用相乘: p(w1)p(w2)...而是使用negative log sum: -log(w1)-log(w2)-...\n", + " \n", + " len_input_str = len(input_str)\n", + " # print(graph)\n", + "\n", + " distances_min = pd.Series( data=np.zeros((len_input_str + 1,)),index=list(range(len_input_str + 1)))\n", + " path = pd.Series( data=np.zeros((len_input_str + 1,),dtype=np.int),index=list(range(len_input_str + 1)))\n", + "\n", + " for i in range(1, len_input_str+1):\n", + " optional_path_distances = np.zeros((i,))\n", + " for j in range(i):\n", + " optional_path_distances[j] = graph.loc[j, i] + distances_min[j]\n", + " distances_min[i] = np.min(optional_path_distances)\n", + " path[i] = np.argmin(optional_path_distances)\n", + " \n", + " # TODO: 第三步: 根据最好的PATH, 返回最好的切分\n", + " best_segment = []\n", + " idx = len_input_str\n", + "\n", + " while idx > 0:\n", + " best_segment.append(input_str[path.loc[idx]: idx])\n", + " idx = path.loc[idx]\n", + "\n", + " best_segment.reverse()\n", + "\n", + " return best_segment " + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['北京', '的', '天气', '真好啊']\n", + "['今天', '的', '课程', '内容', '很有', '意思']\n", + "['经常', '有意见', '分歧']\n" + ] + } + ], + "source": [ + "# 测试\n", + "print(word_segment_naive('北京的天气真好啊'))\n", + "print(word_segment_naive('今天的课程内容很有意思'))\n", + "print(word_segment_naive('经常有意见分歧'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO: 第一种方法和第二种方法的时间复杂度和空间复杂度分别是多少?\n", + "\n", + "第一个方法: \n", + "时间复杂度= O(m^n), 空间复杂度= O(n)\n", + "\n", + "第二个方法:\n", + "时间复杂度= O(n^2), 空间复杂度= O(n^2)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "ename": "SyntaxError", + "evalue": "invalid character in identifier (<ipython-input-7-0c7c1ff5f492>, line 2)", + "output_type": "error", + "traceback": [ + "\u001b[0;36m File \u001b[0;32m\"<ipython-input-7-0c7c1ff5f492>\"\u001b[0;36m, line \u001b[0;32m2\u001b[0m\n\u001b[0;31m - 0. (例), 目前的概率是不完整的,可以考虑大量的语料库,然后从中计算出每一个词出现的概率,这样更加真实\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid character in identifier\n" + ] + } + ], + "source": [ + "# TODO:如果把上述的分词工具持续优化,有哪些可以考虑的方法? (至少列出3点)\n", + "- 0. (例), 目前的概率是不完整的,可以考虑大量的语料库,然后从中计算出每一个词出现的概率,这样更加真实\n", + "- 1.\n", + "- 2.\n", + "- 3. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} -- libgit2 0.26.0