master
/ 02.02 决策树(学生版).ipynb

02.02 决策树(学生版).ipynb @master

4b0cb67
 
 
 
 
 
32f9dec
4b0cb67
 
 
 
 
 
cf9f12d
2350f2b
cf9f12d
 
 
 
 
 
2350f2b
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
2350f2b
 
 
 
 
 
 
 
 
cf9f12d
 
 
 
 
 
 
2350f2b
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
32f9dec
4b0cb67
 
cf9f12d
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
cf9f12d
 
4b0cb67
 
cf9f12d
4b0cb67
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
cf9f12d
 
 
 
 
 
 
 
4b0cb67
 
2350f2b
 
 
 
cf9f12d
2350f2b
 
4b0cb67
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
cf9f12d
 
 
4b0cb67
 
 
cf9f12d
 
 
 
 
 
 
 
 
 
 
 
 
 
4b0cb67
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
cf9f12d
4b0cb67
cf9f12d
4b0cb67
cf9f12d
4b0cb67
 
 
 
 
 
cf9f12d
 
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
4b0cb67
2350f2b
4b0cb67
 
 
 
 
 
 
 
2350f2b
4b0cb67
cf9f12d
4b0cb67
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
2350f2b
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
2350f2b
4b0cb67
 
cf9f12d
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
 
4b0cb67
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
2350f2b
 
4b0cb67
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
2350f2b
 
4b0cb67
2350f2b
4b0cb67
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
2350f2b
 
4b0cb67
2350f2b
4b0cb67
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
2350f2b
4b0cb67
 
cf9f12d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
 
4b0cb67
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9f12d
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
 
 
2350f2b
4b0cb67
 
 
 
 
 
32f9dec
4b0cb67
 
 
 
 
 
32f9dec
4b0cb67
a0fee34
 
 
 
 
 
 
 
 
 
 
4b0cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2.2 决策树"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "决策树是一种通过**树形结构**进行分类的方法,使用层层推理来实现最终的分类。\n",
    "\n",
    "决策树由下面几种元素构成:\n",
    "+ 根节点:最顶层的分类条件。\n",
    "+ 决策节点(中间节点):中间分类条件。\n",
    "+ 叶子节点:代表标签类别。\n",
    "\n",
    "<img src=\"http://imgbed.momodel.cn//20200110170450.png\" width=400>\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "上面的说法过于抽象,下面来看一个实际的例子。构建一棵结构简单的决策树,用于预测贷款用户是否具有偿还贷款的能力。\n",
    "\n",
    "贷款用户主要具备三个属性:**是否拥有房产**,**是否结婚**,**平均月收入**。\n",
    "\n",
    "每一个内部节点都表示一个属性条件判断,叶子节点表示贷款用户是否具有偿还能力。\n",
    "\n",
    "<img src=\"http://imgbed.momodel.cn//20200110171836.png\" width=400>\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "首先判断贷款用户是否拥有房产,如果用户拥有房产,则说明该用户具有偿还贷款的能力;否则需要判断该用户是否结婚,如果已经结婚则具有偿还贷款的能力;否则需要判断该用户的收入大小,如果该用户月收入小于 4K 元,则该用户不具有偿还贷款的能力,否则该用户是具有偿还能力的。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**想一想**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "决策树的流程是什么?\n",
    "\n",
    "在有一个贷款用户A,其情况是月收入 3K、已经结婚、没有房产,那么他是否具有偿还贷款的能力呢?  \n",
    "\n",
    "上图中我们为什么要用“是否拥有房产”作根节点呢?可不可以用“是否结婚”和“平均月收入”做根节点呢?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2.1 决策树分类概念"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<center><video src=\"http://files.momodel.cn/decision_tree_playground_demo.mp4\" controls=\"controls\" width=800px></center>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**数据**: 游乐场经营者提供**天气情况**(如晴、雨、多云)、**温度高低**、**湿度大小**、**风力强弱**等气象特点以及游客当天是否前往游乐场。\n",
    "\n",
    "**目标**: 预测游客是否来游乐场游玩。\n",
    "\n",
    "\n",
    "|序号|天气|温度(℃)|湿度|是否有风|是(1)否(0)前往游乐场|\n",
    "|:--:|:--:|:--:|:--:|:--:|:--:|\n",
    "|1|晴|29|85|否|0|\n",
    "|2|晴|26|88|是|0|\n",
    "|3|多云|28|78|否|1\n",
    "|4|雨|21|96|否|1|\n",
    "|5|雨|20|80|否|1|\n",
    "|6|雨|18|70|是|0|\n",
    "|7|多云|18|65|是|1|\n",
    "|8|晴|22|90|否|0|\n",
    "|9|晴|21|68|否|1|\n",
    "|10|雨|24|80|否|1|\n",
    "|11|晴|24|63|是|1|\n",
    "|12|多云|22|90|是|1|\n",
    "|13|多云|27|75|否|1|\n",
    "|14|雨|21|80|是|0|\n",
    "\n",
    "根据上表,绘制如图所示的决策树:\n",
    "\n",
    "<img src=\"http://imgbed.momodel.cn//20200110172806.png\" width=500>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "根节点是天气状况,具有 **雨**、 **多云** 和 **晴** 三种属性取值。\n",
    "+ **多云**:  样本子集是 { 3, 7, 12, 13 } ,仅有“前往游乐场游玩”一个类别,即肯定去游乐场。  \n",
    "  \n",
    "  \n",
    "+ **晴**:  样本子集是 { 1, 2, 8, 9, 11 }\n",
    "    + 湿度大于 75:样本子集为 { 1, 2, 8 },不前往游乐场。\n",
    "    + 湿度不大于 75:样本子集 { 9, 11 },前往游乐场。\n",
    "    \n",
    "    \n",
    "+ **雨**:样本子集为 { 4, 5, 6, 10, 14 }\n",
    "    + 有风:样本子集 { 6, 14 },不去游乐场。\n",
    "    + 无风:样本子集 { 4, 5, 10 },前往游乐场。\n",
    " "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**想一想**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "通过上面的例子,你观察到构建决策树的过程是哪几步?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "下面,我们创建数据并进行一些预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import math\n",
    "from math import log\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 原始数据\n",
    "datasets = [\n",
    "    ['晴', 29, 85, '否', '0'],\n",
    "    ['晴', 26, 88, '是', '0'],\n",
    "    ['多云', 28, 78, '否', '1'],\n",
    "    ['雨', 21, 96, '否', '1'],\n",
    "    ['雨', 20, 80, '否', '1'],\n",
    "    ['雨', 18, 70, '是', '0'],\n",
    "    ['多云', 18, 65, '是', '1'],\n",
    "    ['晴', 22, 90, '否', '0'],\n",
    "    ['晴', 21, 68, '否', '1'],\n",
    "    ['雨', 24, 80, '否', '1'],\n",
    "    ['晴', 24, 63, '是', '1'],\n",
    "    ['多云', 22, 90, '是', '1'],\n",
    "    ['多云', 27, 75, '否', '1'],\n",
    "    ['雨', 21, 80, '是', '0']\n",
    "]\n",
    "# 数据的列名\n",
    "labels = ['天气', '温度', '湿度', '是否有风', '是否前往游乐场']\n",
    "# 将湿度大小分为大于 75 和小于等于 75 这两个属性值,\n",
    "# 将温度大小分为大于 26 和小于等于 26 这两个属性值\n",
    "for i in range(len(datasets)):\n",
    "    if datasets[i][2] > 75:\n",
    "        datasets[i][2] = '>75'\n",
    "    else:\n",
    "        datasets[i][2] = '<=75'\n",
    "    if datasets[i][1] > 26:\n",
    "        datasets[i][1] = '>26'\n",
    "    else:\n",
    "        datasets[i][1] = '<=26'\n",
    "# 构建 dataframe 并查看数据\n",
    "df = pd.DataFrame(datasets, columns=labels)\n",
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2.2 构建决策树  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<center><video src=\"http://files.momodel.cn/decision_tree_entropy.mp4\" controls=\"controls\" width=800px></center>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**想一想**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "信息熵和信息增益分别是什么?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "假设有 $K$ 个信息,其组成了集合样本 $D$ ,记第 $k$ 个信息发生的概率为 $p_k(1≤k≤K)$。 \n",
    "\n",
    "这 $K$ 个信息的信息熵该如何计算?  \n",
    "\n",
    "所有 $p_k$ 累加起来的和是多少?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**动手练**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "根据公式编写代码计算信息熵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calc_entropy(total_num, count_dict):\n",
    "    \"\"\"\n",
    "    计算信息熵\n",
    "    :param total_num: 总样本数\n",
    "    :param count_dict: 每类样本及其对应数目的字典\n",
    "    :return: 信息熵\n",
    "    \"\"\"\n",
    "    # todo 使用公式计算信息熵\n",
    "    ent = \n",
    "    # 返回信息熵,精确到小数点后 4 位\n",
    "    return round(ent, 4)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "\n",
    "现在用**信息熵**来构建决策树。数据中 14 个样本分为 “游客来游乐场 (9 个样本)” 和 “游客不来游乐场( 5 个样本)” 两个类别,即 K = 2。\n",
    "\n",
    " “游客来游乐场”  “游客不来游乐场” 的概率分别为 $p_1$  $p_2$ ,显然 $p_1=\\frac{9}{14}$,$p_1=\\frac{5}{14}$,则这 14 个样本所蕴含的信息熵:\n",
    "\n",
    "$$E(D)=-\\sum_{k=1}^{2}p_{k}log_{2}{p_k}=-(\\frac{9}{14}×log_{2}{\\frac{9}{14}}+\\frac{5}{14}×log_{2}{\\frac{5}{14}})=0.940$$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们可以用下面这种方式对 DataFrame 的数据按条件进行筛选。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 例如:按 是否前往游乐场 == 0 进行筛选\n",
    "df[df['是否前往游乐场']=='0']\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用上面的方法,可以得到计算信息熵所需的总样本数,以及每类样本及其对应数目的字典,然后计算信息熵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 总样本数\n",
    "total_num = df.shape[0]\n",
    "# 每类样本及其对应数目的字典\n",
    "count_dict = {'前往': df[df['是否前往游乐场']=='1'].shape[0], '不前往': df[df['是否前往游乐场']=='1'].shape[1]}\n",
    "# 计算信息熵\n",
    "entropy = calc_entropy(total_num, count_dict)\n",
    "entropy\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<center><video src=\"http://files.momodel.cn/decision_tree_build.mp4\" controls=\"controls\" width=800px></center>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**计算天气状况所对应的信息熵**:  \n",
    "天气状况的三个属性记为 $a_0=“晴”$ ,$a_1=“多云”$ ,$a_2=“雨”$ ,  \n",
    "属性取值为 $a_i$ 对应分支节点所包含子样本集记为 $D_i$ ,该子样本集包含样本数量记为 $|D_i|$ 。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "|天气属性取值$a_i$|“晴”|“多云”|“雨”|\n",
    "|:--:|:--:|:--:|:--:|\n",
    "|对应样本数$|D_i|$|5|4|5|\n",
    "|正负样本数量|(2+,3-)|(4+,0-)|(3+,2-)|\n",
    "\n",
    "计算天气状况每个属性值的信息熵:\n",
    "\n",
    "$“晴”:E(D_0)=-(\\frac{2}{5}×log_{2}{\\frac{2}{5}}+\\frac{3}{5}×log_{2}{\\frac{3}{5}})=0.971$\n",
    "\n",
    "$“多云”:E(D_1)=-(\\frac{4}{4}×log_{2}{\\frac{4}{4}})=0$\n",
    "\n",
    "$“雨”:E(D_2)=-(\\frac{3}{5}×log_{2}{\\frac{3}{5}}+\\frac{2}{5}×log_{2}{\\frac{2}{5}})=0.971$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "现在,我们来编写代码进行完成上面的计算。\n",
    "\n",
    "首先,我们可以使用下面的写法,对 Dataframe 进行多个条件的筛选。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 筛选出 天气为晴并且去游乐场的样本数据\n",
    "df[(df['天气']=='晴') & (df['是否前往游乐场']=='1')]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 天气为晴的总天数\n",
    "total_num_sun = df[df['天气']=='晴'].shape[0]\n",
    "\n",
    "# 天气为晴时,去游乐场和不去游乐场的人数\n",
    "count_dict_sun = {'前往':df[(df['天气']=='晴') & (df['是否前往游乐场']=='1')].shape[0],\n",
    "                  '不前往':df[(df['天气']=='晴') & (df['是否前往游乐场']=='0')].shape[0]}\n",
    "print(count_dict_sun)\n",
    "\n",
    "# 计算天气-晴 的信息熵\n",
    "ent_sun = calc_entropy(total_num_sun, count_dict_sun)\n",
    "print('天气-晴 的信息熵为:%s' % ent_sun)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 天气为多云的总天数\n",
    "total_num_cloud = df[df['天气']=='多云'].shape[0]\n",
    "\n",
    "# 天气为多云时,去游乐场和不去游乐场的人数\n",
    "count_dict_cloud = {'前往':df[(df['天气']=='多云') & (df['是否前往游乐场']=='1')].shape[0],\n",
    "                    '不前往':df[(df['天气']=='多云') & (df['是否前往游乐场']=='0')].shape[0]}\n",
    "print(count_dict_cloud)\n",
    "\n",
    "# 计算天气-多云 的信息熵\n",
    "ent_cloud = calc_entropy(total_num_cloud, count_dict_cloud)\n",
    "print('天气-多云 的信息熵为:%s' % ent_cloud)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 天气为雨的总天数\n",
    "total_num_rain = df[df['天气']=='雨'].shape[0]\n",
    "\n",
    "# 天气为雨时,去游乐场和不去游乐场的人数\n",
    "count_dict_rain = {'前往':df[(df['天气']=='雨') & (df['是否前往游乐场']=='1')].shape[0],\n",
    "                   '不前往':df[(df['天气']=='雨') & (df['是否前往游乐场']=='0')].shape[0]}\n",
    "print(count_dict_rain)\n",
    "\n",
    "# 计算天气-雨 的信息熵\n",
    "ent_rain = calc_entropy(total_num_rain, count_dict_rain)\n",
    "print('天气-雨 的信息熵为:%s' % ent_rain)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "计算天气状况的信息增益:  \n",
    "$$Gain(D,A)=E(D)-\\sum_{i}^{n}\\frac{|D_i|}{D}E(D)$$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "其中,$A=“天气状况”$。于是天气状况这一气象特点的信息增益为:\n",
    "$$Gain(D,天气)=0.940-(\\frac{5}{14}×0.971+\\frac{4}{14}×0+\\frac{5}{14}×0.971=0.246$$\n",
    "\n",
    "同理可以计算温度高低、湿度大小、风力强弱三个气象特点的信息增益。  \n",
    "通常情况下,某个分支的信息增益越大,则该分支对样本集划分所获得的“纯度”越大,信息不确定性减少的程度越大。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**动手练**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用上面的公式计算信息增益。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# todo 计算按天气状况分割的信息增益\n",
    "gain = \n",
    "gain\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 扩展内容\n",
    "\n",
    "**基尼指数**\n",
    "\n",
    "除了使用信息增益以外,我们也可以使用基尼指数来构建决策树。\n",
    "\n",
    "分类问题中,假设有 $K$ 个类,样本点属于第 $k$ 类的概率为 $p_{k}$,则概率分布的基尼指数定义为:\n",
    "\n",
    "$$\\operatorname{Gini}(p)=\\sum_{k=1}^{K} p_{k}\\left(1-p_{k}\\right)=1-\\sum_{k=1}^{K} p_{k}^{2}$$\n",
    "\n",
    "\n",
    "对于给定的样本集合 $D$,其基尼指数为\n",
    "\n",
    "$$\\operatorname{Gini}(D)=1-\\sum_{k=1}^{K}\\left(\\frac{\\left|C_{k}\\right|}{|D|}\\right)^{2}$$\n",
    "\n",
    "这里,$C_{k}$  $D$ 中属于第 $k$ 类的样本子集,$K$ 是类的个数。\n",
    "\n",
    "如果样本集合 $D$ 根据特征 $A$ 是否取某一可能值 $a$ 被分割为 $D_{1}$   $D_{2}$ 两部分,即\n",
    "\n",
    "$$D_{1}=\\{(x, y) \\in D | A(x)=a\\}, \\quad D_{2}=D-D_{1}$$\n",
    "\n",
    "则在特征 $A$ 的条件下,集合 $D$ 的基尼指数定义为\n",
    "\n",
    "$$\\operatorname{Gini}(D, A)=\\frac{\\left|D_{1}\\right|}{|D|}\n",
    "\\operatorname{Gini}\\left(D_{1}\\right)+\\frac{\\left|D_{2}\\right|}{|D|} \\operatorname{Gini}\\left(D_{2}\\right)$$\n",
    "\n",
    "基尼指数 $Gini(D)$ 表示集合 $D$ 的不确定性,基尼指数 $Gini(D, A)$ 表示经过分割后集合 $D$ 的不确定性。基尼指数值越大,样本集合的不确定性也就越大,这一点与信息熵相似。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**想一想**:"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对于二分类问题,若样本点属于第 1 个类的概率是 $p$,则概率分布的基尼指数是多少?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 思考与练习    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1. 分别将天气状况、温度高低、湿度大小、风力强弱作为分支点来构建决策树,查看信息增益。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# todo 以温度 26 为温度高低的分界线,计算信息增益\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# todo 以湿度 75 为湿度大小的分界线,计算信息增益\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# todo 以有风无风划分,计算信息增益\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "2. 每朵鸢尾花有萼片长度、萼片宽度、花瓣长度、花瓣宽度四个特征。现在需要根据这四个特征将鸢尾花分为杂色鸢尾花、维吉尼亚鸢尾和山鸢尾三类,试构造决策树进行分类。\n",
    "\n",
    "|序号|萼片长度|萼片宽度|花瓣长度|花瓣宽度|种类|\n",
    "|:--:|:--:|:--:|:--:|:--:|:--:|\n",
    "|1|5.0|2.0|3.5|1.0|杂色鸢尾|\n",
    "|2|6.0|2.2|5.0|1.5|维吉尼亚鸢尾|\n",
    "|3|6.0|2.2|4.0|1.0|杂色鸢尾|\n",
    "|4|6.2|2.2|4.5|1.5|杂色鸢尾|\n",
    "|5|4.5|2.3|1.3|0.3|山鸢尾|"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "观察上表中的五笔数据,我们可以看到 杂色鸢尾  维吉尼亚鸢尾 的花瓣宽度明显大于山鸢尾,所以可以通过判断花瓣宽度是否大于 0.7,来将山鸢尾区从其他两种鸢尾中区分出来。\n",
    "\n",
    "同时,杂色鸢尾  维吉尼亚鸢尾 的花瓣长度明显大于山鸢尾,所以也可以通过判断花瓣长度是否大于 2.4,来将山鸢尾区从其他两种鸢尾中区分出来。\n",
    "\n",
    "然后我们观察到 维吉尼亚鸢尾的花瓣长度明显大于杂色鸢尾,所以可以通过判断花瓣长度是否大于 4.75,来将杂色鸢尾和维吉尼亚鸢尾区分出来。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "实际上是否如此呢?你能否想到其他的切分方式?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "上面的表格只是 Iris 数据集的一小部分,完整的数据集包含 150 个数据样本,分为 3 类,每类 50 个数据,每个数据包含 4 个属性。即花萼长度,花萼宽度,花瓣长度,花瓣宽度4个属性。\n",
    "\n",
    "我们使用 sklearn 工具包来构建决策树模型,先导入数据集。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.datasets import load_iris\n",
    "# 加载数据集\n",
    "iris = load_iris()\n",
    "# 查看 label\n",
    "print(list(iris.target_names))\n",
    "# 查看 feature\n",
    "print(iris.feature_names)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "setosa 是山鸢尾,versicolor是杂色鸢尾,virginica是维吉尼亚鸢尾。\n",
    "\n",
    "sepal length, sepal width,petal length,petal width 分别是萼片长度,萼片宽度,花瓣长度,花瓣宽度。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "然后进行训练集和测试集的切分。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "# 载入数据\n",
    "X, y = load_iris(return_X_y=True)\n",
    "# 切分训练集合测试集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来,我们在训练集数据上训练决策树模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn import tree\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "# 初始化模型,可以调整 max_depth 来观察模型的表现\n",
    "# 也可以调整 criterion   gini 来使用 gini 指数构建决策树\n",
    "clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=2)\n",
    "# 训练模型\n",
    "clf = clf.fit(X_train, y_train)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们可以使用 graphviz 包来展示构建好的决策树。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import graphviz\n",
    "feature_names = ['萼片长度','萼片宽度','花瓣长度','花瓣宽度']\n",
    "target_names = ['山鸢尾', '杂色鸢尾', '维吉尼亚鸢尾']\n",
    "# 可视化生成的决策树\n",
    "dot_data = tree.export_graphviz(clf, out_file=None,\n",
    "                     feature_names=feature_names,\n",
    "                     class_names=target_names,\n",
    "                     filled=True, rounded=True,\n",
    "                     special_characters=True)\n",
    "graph = graphviz.Source(dot_data)\n",
    "graph\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们看模型在测试集上的表现。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "y_test_predict = clf.predict(X_test)\n",
    "accuracy_score(y_test,y_test_predict)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 实践与体验\n",
    "\n",
    "**计算文章的信息熵**\n",
    "\n",
    "收集中英文对照的短文,在计算短文内中文单词和英文单词出现概率基础上,计算该两篇短文的信息熵,比较中文短文信息熵和英文短文信息熵的大小。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "首先定义一个方法来辅助读取文件的内容。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_file(path):\n",
    "    \"\"\"\n",
    "    读取某文件的内容\n",
    "    :param path: 文件的路径\n",
    "    :return: 文件的内容\n",
    "    \"\"\"\n",
    "    contents = \"\"\n",
    "    with open(path) as f:\n",
    "        # 读取每一行的内容\n",
    "        for line in f.readlines():\n",
    "            contents += line\n",
    "    return contents\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用上面定义的方法读取英文短文及其对应的中文短文。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取英文短文\n",
    "en_essay = read_file('essay3_en.txt')\n",
    "# 读取中文短文\n",
    "ch_essay = read_file('essay3_ch.txt')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "处理文本,统计单词出现的概率,并计算信息熵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import Counter\n",
    "import re\n",
    "\n",
    "\n",
    "def cal_essay_entropy(essay, split_by=None):\n",
    "    \"\"\"\n",
    "    计算文章的信息熵\n",
    "    :param essay: 文章内容\n",
    "    :param split_by: 切分方式,对于中文文章,不需传入,按字符切分,\n",
    "                     对于英文文章,需传入空格字符来进行切分\n",
    "    :return: 文章的信息熵\n",
    "    \"\"\"\n",
    "    # 把英文全部转为小写\n",
    "    essay = essay.lower()\n",
    "    # 去除标点符号\n",
    "    essay = re.sub(\n",
    "        \"[\\f+\\n+\\r+\\t+\\v+\\?\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#《》¥%……&*()]\", \"\",\n",
    "        essay)\n",
    "    # print(essay)\n",
    "    # 把文本分割为词\n",
    "    if split_by:\n",
    "        word_list = essay.split(split_by)\n",
    "    else:\n",
    "        word_list = list(essay)\n",
    "    # 统计总的单词数\n",
    "    word_number = len(word_list)\n",
    "    print('此文章共有 %s 个单词' % word_number)\n",
    "    # 得到每个单词出现的次数\n",
    "    word_counter = Counter(word_list)\n",
    "    # print('每个单词出现的次数为:%s' % word_counter)\n",
    "    # 使用信息熵公式计算信息熵\n",
    "    ent = -sum([(p / word_number) * log(p / word_number, 2) for p in\n",
    "                word_counter.values()])\n",
    "    print('信息熵为:%.2f' % ent)\n",
    "    return ent\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ent = cal_essay_entropy(ch_essay)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ent = cal_essay_entropy(en_essay, split_by = ' ')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**问题 1**: 你在上面的试验中观察到了什么?请在下面写下你观察到的现象,并尝试分析其原因。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**答案 1**:(在此处填写你的答案。)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 扩展阅读\n",
    "\n",
    "1. [决策树与随机森林](https://www.bilibili.com/video/av26086646?from=search&seid=6716049859412037731)\n",
    "2. [从决策树到随机森林:树型算法的原理与实现](https://www.jiqizhixin.com/articles/2017-07-31-3)\n",
    "3. [sklearn 决策树](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}