先说学习心得

通过这篇对特征重要性的baseline学习,我学习到了如下三个点:

1.feature_importance

2.一款GPU计算的开源框架rapids

3.回顾了xgb树模型的生成过程

资源搬运如下:

https://www.kaggle.com/aerdem4/m5-lofo-importance-on-gpu-via-rapids-xgboost

希望大家过去看看这位大佬的其他分享。谢谢。同时大佬在利物浦的比赛中又复用了这个计算特征重要性的方法。

from  datetime import datetime, timedelta
import gc
import numpy as np, pandas as pd
import lightgbm as lgb

import cudf
import cu_utils.transform as cutran
cu_utils这个是这位kaggler手写的轮子:
from numba import cuda, float32
import math


def cu_mean_transform(x, y_out):
    res = cuda.shared.array(1, dtype=float32)
    res[0] = 0
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        cuda.atomic.add(res, 0, x[i])
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        y_out[i] = res[0] / len(x)


def cu_max_transform(x, y_out):
    res = cuda.shared.array(1, dtype=float32)
    res[0] = -math.inf
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        cuda.atomic.max(res, 0, x[i])
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        y_out[i] = res[0]


def cu_min_transform(x, y_out):
    res = cuda.shared.array(1, dtype=float32)
    res[0] = math.inf
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        cuda.atomic.min(res, 0, x[i])
    cuda.syncthreads()

    for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
        y_out[i] = res[0]


def get_cu_shift_transform(shift_by, null_val=-1):
    def cu_shift_transform(x, y_out):
        for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
            y_out[i] = null_val
            if 0 <= i-shift_by < len(x):
                y_out[i] = x[i-shift_by]
    return cu_shift_transform


def get_cu_rolling_mean_transform(window, null_val=-1):
    def cu_rolling_mean_transform(x, y_out):
        for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
            y_out[i] = 0
            if i >= window-1:
                for j in range(cuda.threadIdx.y, window, cuda.blockDim.y):
                    cuda.atomic.add(y_out, i, x[i-j])
                y_out[i] /= window
            else:
                y_out[i] = null_val
    return cu_rolling_mean_transform


def get_cu_rolling_max_transform(window, null_val=-1):
    def cu_rolling_max_transform(x, y_out):
        for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
            y_out[i] = -math.inf
            if i >= window-1:
                for j in range(cuda.threadIdx.y, window, cuda.blockDim.y):
                    cuda.atomic.max(y_out, i, x[i-j])
            else:
                y_out[i] = null_val
    return cu_rolling_max_transform


def get_cu_rolling_min_transform(window, null_val=-1):
    def cu_rolling_min_transform(x, y_out):
        for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
            y_out[i] = math.inf
            if i >= window-1:
                for j in range(cuda.threadIdx.y, window, cuda.blockDim.y):
                    cuda.atomic.min(y_out, i, x[i-j])
            else:
                y_out[i] = null_val
    return cu_rolling_min_transform
轮子

相关文章: