zoukankan      html  css  js  c++  java
  • 实验二 K-近邻算法及应用

    | 实验名称 | K-近邻算法及应用 |
    | ---- | ---- | ---- |
    | 班级 | 计算机183 | |
    |完成人|袁健|
    |学号|3180701334|

    一、【实验目的】

    理解K-近邻算法原理,能实现算法K近邻算法;
    掌握常见的距离度量方法;
    掌握K近邻树实现算法;
    针对特定应用场景及数据,能应用K近邻解决实际问题。

    二、【实验内容】

    实现曼哈顿距离、欧氏距离、闵式距离算法,并测试算法正确性。
    实现K近邻树算法;
    针对iris数据集,应用sklearn的K近邻算法进行类别预测。
    针对iris数据集,编制程序使用K近邻树进行类别预测。

    三、【实验报告要求】

    对照实验内容,撰写实验过程、算法及测试结果;
    代码规范化:命名规则、注释;
    分析核心算法的复杂度;
    查阅文献,讨论K近邻的优缺点;
    举例说明K近邻的应用场景。

    四、【实验结果】

    import math
    from itertools import combinations
    
    def L(x, y, p=2):
        # x1 = [1, 1], x2 = [5,1]
        if len(x) == len(y) and len(x) > 1:
            sum = 0
            for i in range(len(x)):
                sum += math.pow(abs(x[i] - y[i]), p)
            return math.pow(sum, 1/p)
        else:
            return 0
    
    x1 = [1, 1]
    x2 = [5, 1]
    x3 = [4, 4]
    
    for i in range(1, 5):
        r = { '1-{}'.format(c):L(x1, c, p=i) for c in [x2, x3]}
        print(min(zip(r.values(), r.keys())))
    
    

    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    %matplotlib inline
    from sklearn.datasets import load_iris
    from sklearn.model_selection import train_test_split
    from collections import Counter
    
    # data
    iris = load_iris()
    df = pd.DataFrame(iris.data, columns=iris.feature_names)
    df['label'] = iris.target
    df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
    # data = np.array(df.iloc[:100, [0, 1, -1]])
    
    df
    




    plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0')
    plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')
    plt.xlabel('sepal length')
    plt.ylabel('sepal width')
    plt.legend()
    

    class KNN:
        def __init__(self, X_train, y_train, n_neighbors=3, p=2):
            """
            parameter: n_neighbors 临近点个数
            parameter: p 距离度量
            """
            self.n = n_neighbors
            self.p = p
            self.X_train = X_train
            self.y_train = y_train
            
        def predict(self, X):
            # 取出n个点
            knn_list = []
            for i in range(self.n):
                dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
                knn_list.append((dist, self.y_train[i]))
                
            for i in range(self.n, len(self.X_train)):
                max_index = knn_list.index(max(knn_list, key=lambda x: x[0]))
                dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
                if knn_list[max_index][0] > dist:
                    knn_list[max_index] = (dist, self.y_train[i])
                    
            # 统计
            knn = [k[-1] for k in knn_list]
            count_pairs = Counter(knn)
            max_count = sorted(count_pairs, key=lambda x:x)[-1]
            return max_count
        
        def score(self, X_test, y_test):
            right_count = 0
            n = 10
            for X, y in zip(X_test, y_test):
                label = self.predict(X)
                if label == y:
                    right_count += 1
            return right_count / len(X_test)
    
    clf = KNN(X_train, y_train)
    clf.score(X_test, y_test)
    


    test_point = [6.0, 3.0]
    print('Test Point: {}'.format(clf.predict(test_point)))
    
    

    plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0')
    plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')
    plt.plot(test_point[0], test_point[1], 'bo', label='test_point')
    plt.xlabel('sepal length')
    plt.ylabel('sepal width')
    plt.legend()
    

    from sklearn.neighbors import KNeighborsClassifier
    clf_sk = KNeighborsClassifier()
    clf_sk.fit(X_train, y_train)
    

    clf_sk.score(X_test, y_test)
    

    # kd-tree每个结点中主要包含的数据结构如下
    class KdNode(object):
        def __init__(self, dom_elt, split, left, right):
            self.dom_elt = dom_elt # k维向量节点(k维空间中的一个样本点)
            self.split = split # 整数(进行分割维度的序号)
            self.left = left # 该结点分割超平面左子空间构成的kd-tree
            self.right = right # 该结点分割超平面右子空间构成的kd-tree
    
            
    class KdTree(object):
        def __init__(self, data):
            k = len(data[0]) # 数据维度
            
        def CreateNode(split, data_set): # 按第split维划分数据集exset创建KdNode
            if not data_set: # 数据集为空
                return None
            # key参数的值为一个函数,此函数只有一个参数且返回一个值用来进行比较
            # operator模块提供的itemgetter函数用于获取对象的哪些维的数据,参数为需要获取的数据在对象
            #data_set.sort(key=itemgetter(split)) # 按要进行分割的那一维数据排序
            data_set.sort(key=lambda x: x[split])
            split_pos = len(data_set) // 2 # //为Python中的整数除法
            median = data_set[split_pos] # 中位数分割点
            split_next = (split + 1) % k # cycle coordinates
    
            # 递归的创建kd树
            return KdNode(median, split,
                            CreateNode(split_next, data_set[:split_pos]), # 创建左子树
                            CreateNode(split_next, data_set[split_pos + 1:])) # 创建右子树
            
            self.root = CreateNode(0, data) # 从第0维分量开始构建kd树,返回根节点
    
    # KDTree的前序遍历
    def preorder(root):
        print (root.dom_elt)
        if root.left: # 节点不为空
            preorder(root.left)
        if root.right:
            preorder(root.right)
    

    # 对构建好的kd树进行搜索,寻找与目标点最近的样本点:
    from math import sqrt
    from collections import namedtuple
    
    # 定义一个namedtuple,分别存放最近坐标点、最近距离和访问过的节点数
    result = namedtuple("Result_tuple", "nearest_point nearest_dist nodes_visited")
    
    def find_nearest(tree, point):
        k = len(point) # 数据维度
        def travel(kd_node, target, max_dist):
            if kd_node is None:
                return result([0] * k, float("inf"), 0) # python中用float("inf")和float("-inf")表示正负
    
            nodes_visited = 1
    
            s = kd_node.split # 进行分割的维度
            pivot = kd_node.dom_elt # 进行分割的“轴”
    
            if target[s] <= pivot[s]: # 如果目标点第s维小于分割轴的对应值(目标离左子树更近)
                nearer_node = kd_node.left # 下一个访问节点为左子树根节点
                further_node = kd_node.right # 同时记录下右子树
            else: # 目标离右子树更近
                nearer_node = kd_node.right # 下一个访问节点为右子树根节点
                further_node = kd_node.left
    
                temp1 = travel(nearer_node, target, max_dist) # 进行遍历找到包含目标点的区域
    
                nearest = temp1.nearest_point # 以此叶结点作为“当前最近点”
                dist = temp1.nearest_dist # 更新最近距离
    
                nodes_visited += temp1.nodes_visited
    
                if dist < max_dist:
                    max_dist = dist # 最近点将在以目标点为球心,max_dist为半径的超球体内
    
                temp_dist = abs(pivot[s] - target[s]) # 第s维上目标点与分割超平面的距离
                if max_dist < temp_dist: # 判断超球体是否与超平面相交
                    return result(nearest, dist, nodes_visited) # 不相交则可以直接返回,不用继续判断
                
                #----------------------------------------------------------------------
                # 计算目标点与分割点的欧氏距离
                temp_dist = sqrt(sum((p1 - p2) ** 2 for p1, p2 in zip(pivot, target)))
    
                if temp_dist < dist: # 如果“更近”
                    nearest = pivot # 更新最近点
                    dist = temp_dist # 更新最近距离
                    max_dist = dist # 更新超球体半径
                # 检查另一个子结点对应的区域是否有更近的点
                temp2 = travel(further_node, target, max_dist)
                
                nodes_visited += temp2.nodes_visited
                if temp2.nearest_dist < dist: # 如果另一个子结点内存在更近距离
                    nearest = temp2.nearest_point # 更新最近点
                    dist = temp2.nearest_dist # 更新最近距离
    
                return result(nearest, dist, nodes_visited)
        return travel(tree.root, point, float("inf")) # 从根节点开始递归
    

    data = [[2,3],[5,4],[9,6],[4,7],[8,1],[7,2]]
    kd = KdTree(data)
    preorder(kd.root)
    

    from time import clock
    from random import random
    
    # 产生一个k维随机向量,每维分量值在0~1之间
    def random_point(k):
        return [random() for _ in range(k)]
    # 产生n个k维随机向量
    def random_points(k, n):
        return [random_point(k) for _ in range(n)]
    
    

    ret = find_nearest(kd, [3,4.5])
    print (ret)
    

    N = 400000
    t0 = clock()
    kd2 = KdTree(random_points(3, N)) # 构建包含四十万个3维空间样本点的kd树
    ret2 = find_nearest(kd2, [0.1,0.5,0.8]) # 四十万个样本点中寻找离目标最近的点
    t1 = clock()
    print ("time: ",t1-t0, "s")
    print (ret2)
    

    五、实验小结

    通过这次实验,让我理解了K-近邻算法原理,能基本实现算法K近邻算法;
    也掌握了一些常见的距离度量方法,以及对K近邻树实现算法也有了一定的认识;

  • 相关阅读:
    507.Perfect Number
    441.Arranging Coins
    344.Reverse String
    160.Intersection of Two Linked Lists
    HDU-2521 反素数
    HDU-2710 Max Factor
    HDU-2552 三足鼎立
    HDU-2549 壮志难酬
    HDU-2548 两军交锋
    HDU-2550 百步穿杨
  • 原文地址:https://www.cnblogs.com/Rasend027/p/14750391.html
Copyright © 2011-2022 走看看