How to use nearest neighbours for Regression?
0

How to use nearest neighbours for Regression?

This recipe helps you use nearest neighbours for Regression
In [2]:
## How to use nearest neighbours for Regression
def Snippet_154():
    print()
    print(format('## How to use nearest neighbours for Regression','*^82'))

    import warnings
    warnings.filterwarnings("ignore")

    # load libraries
    from sklearn import decomposition, datasets
    from sklearn import neighbors
    from sklearn.pipeline import Pipeline
    from sklearn.model_selection import GridSearchCV, cross_val_score
    from sklearn.preprocessing import StandardScaler

    # Load the iris flower data
    dataset = datasets.make_regression(n_samples=1000, n_features=20, n_informative=10,
                n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0,
                shuffle=True, coef=False, random_state=None)
    X = dataset[0]
    y = dataset[1]

    # Create an scaler object
    sc = StandardScaler()

    # Create a pca object
    pca = decomposition.PCA()

    # Create a logistic regression object with an L2 penalty
    KNN = neighbors.KNeighborsRegressor()

    # Create a pipeline of three steps. First, standardize the data.
    # Second, tranform the data with PCA.
    # Third, train a Decision Tree Classifier on the data.
    pipe = Pipeline(steps=[('sc', sc),
                           ('pca', pca),
                           ('KNN', KNN)])

    # Create Parameter Space
    # Create a list of a sequence of integers from 1 to 30 (the number of features in X + 1)
    n_components = list(range(1,X.shape[1]+1,1))

    # Create lists of parameter for KNeighborsRegressor()
    n_neighbors = [5, 10]
    algorithm = ['auto',  'ball_tree', 'kd_tree', 'brute']

    # Create a dictionary of all the parameter options 
    # Note has you can access the parameters of steps of a pipeline by using '__’
    parameters = dict(pca__n_components=n_components,
                      KNN__n_neighbors=n_neighbors,
                      KNN__algorithm=algorithm)

    # Conduct Parameter Optmization With Pipeline
    # Create a grid search object
    clf = GridSearchCV(pipe, parameters)

    # Fit the grid search
    clf.fit(X, y)

    # View The Best Parameters
    print('Best Number Of Components:', clf.best_estimator_.get_params()['pca__n_components'])
    print(); print(clf.best_estimator_.get_params()['KNN'])

    # Use Cross Validation To Evaluate Model
    CV_Result = cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring='r2')
    print(); print(CV_Result)
    print(); print(CV_Result.mean())
    print(); print(CV_Result.std())

Snippet_154()
*****************## How to use nearest neighbours for Regression******************
Best Number Of Components: 17

KNeighborsRegressor(algorithm='auto', leaf_size=30, metric='minkowski',
          metric_params=None, n_jobs=None, n_neighbors=10, p=2,
          weights='uniform')

[0.61096235 0.62923981 0.58886741]

0.6096898558628526

0.016506503277788975