wuzaipei
package classifierAlg
import config.conf.{sc, spark_session}
import config.conf.spark_session.implicits._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame}
object euclideanDist {
  def main(args: Array[String]): Unit = {
    val path:String = "data/irsdf/part-00000-ca2d6ce7-bcd0-4c24-aba9-e8cb01dcc04c-c000.csv"
    val df: DataFrame = spark_session.read.csv(path).toDF("ft1","ft2","ft3","ft4","label")
    val cols: Array[String] = df.columns
    val n: Int = cols.length
//     val colNames : Array[String] = df.schema.fieldNames
    val colsd: Array[Column] = cols.map(f => df(f).cast(DoubleType))
    val df2: DataFrame = df.select(colsd: _*)
    df2.printSchema()

  }
}

  

 

分类:

技术点:

相关文章:

  • 2021-12-08
  • 2021-11-28
  • 2022-02-09
  • 2021-12-21
  • 2021-12-23
  • 2022-01-18
  • 2022-01-29
猜你喜欢
  • 2021-12-08
  • 2021-11-20
  • 2021-11-20
相关资源
相似解决方案