web-dev-qa-db-fra.com

Suppression d'une colonne imbriquée de Spark DataFrame

J'ai un DataFrame avec le schéma

root
 |-- label: string (nullable = true)
 |-- features: struct (nullable = true)
 |    |-- feat1: string (nullable = true)
 |    |-- feat2: string (nullable = true)
 |    |-- feat3: string (nullable = true)

Pendant que je suis capable de filtrer la trame de données en utilisant

  val data = rawData
     .filter( !(rawData("features.feat1") <=> "100") )

Je ne parviens pas à supprimer les colonnes à l'aide de

  val data = rawData
       .drop("features.feat1")

Est-ce quelque chose que je fais mal ici? J'ai également essayé (sans succès) de faire drop(rawData("features.feat1")), mais cela n'a pas beaucoup de sens de le faire.

Merci d'avance,

Nikhil

22
Nikhil J Joshi

Ce n'est qu'un exercice de programmation, mais vous pouvez essayer quelque chose comme ceci:

import org.Apache.spark.sql.{DataFrame, Column}
import org.Apache.spark.sql.types.{StructType, StructField}
import org.Apache.spark.sql.{functions => f}
import scala.util.Try

case class DFWithDropFrom(df: DataFrame) {
  def getSourceField(source: String): Try[StructField] = {
    Try(df.schema.fields.filter(_.name == source).head)
  }

  def getType(sourceField: StructField): Try[StructType] = {
    Try(sourceField.dataType.asInstanceOf[StructType])
  }

  def genOutputCol(names: Array[String], source: String): Column = {
    f.struct(names.map(x => f.col(source).getItem(x).alias(x)): _*)
  }

  def dropFrom(source: String, toDrop: Array[String]): DataFrame = {
    getSourceField(source)
      .flatMap(getType)
      .map(_.fieldNames.diff(toDrop))
      .map(genOutputCol(_, source))
      .map(df.withColumn(source, _))
      .getOrElse(df)
  }
}

Exemple d'utilisation:

scala> case class features(feat1: String, feat2: String, feat3: String)
defined class features

scala> case class record(label: String, features: features)
defined class record

scala> val df = sc.parallelize(Seq(record("a_label",  features("f1", "f2", "f3")))).toDF
df: org.Apache.spark.sql.DataFrame = [label: string, features: struct<feat1:string,feat2:string,feat3:string>]

scala> DFWithDropFrom(df).dropFrom("features", Array("feat1")).show
+-------+--------+
|  label|features|
+-------+--------+
|a_label| [f2,f3]|
+-------+--------+


scala> DFWithDropFrom(df).dropFrom("foobar", Array("feat1")).show
+-------+----------+
|  label|  features|
+-------+----------+
|a_label|[f1,f2,f3]|
+-------+----------+


scala> DFWithDropFrom(df).dropFrom("features", Array("foobar")).show
+-------+----------+
|  label|  features|
+-------+----------+
|a_label|[f1,f2,f3]|
+-------+----------+

Ajoutez un conversion implicite et vous êtes prêt à partir.

25
zero323

Cette version vous permet de supprimer des colonnes imbriquées à n'importe quel niveau:

import org.Apache.spark.sql._
import org.Apache.spark.sql.functions._
import org.Apache.spark.sql.types.{StructType, DataType}

/**
  * Various Spark utilities and extensions of DataFrame
  */
object DataFrameUtils {

  private def dropSubColumn(col: Column, colType: DataType, fullColName: String, dropColName: String): Option[Column] = {
    if (fullColName.equals(dropColName)) {
      None
    } else {
      colType match {
        case colType: StructType =>
          if (dropColName.startsWith(s"${fullColName}.")) {
            Some(struct(
              colType.fields
                .flatMap(f =>
                  dropSubColumn(col.getField(f.name), f.dataType, s"${fullColName}.${f.name}", dropColName) match {
                    case Some(x) => Some(x.alias(f.name))
                    case None => None
                  })
                : _*))
          } else {
            Some(col)
          }
        case other => Some(col)
      }
    }
  }

  protected def dropColumn(df: DataFrame, colName: String): DataFrame = {
    df.schema.fields
      .flatMap(f => {
        if (colName.startsWith(s"${f.name}.")) {
          dropSubColumn(col(f.name), f.dataType, f.name, colName) match {
            case Some(x) => Some((f.name, x))
            case None => None
          }
        } else {
          None
        }
      })
      .foldLeft(df.drop(colName)) {
        case (df, (colName, column)) => df.withColumn(colName, column)
      }
  }

  /**
    * Extended version of DataFrame that allows to operate on nested fields
    */
  implicit class ExtendedDataFrame(df: DataFrame) extends Serializable {
    /**
      * Drops nested field from DataFrame
      *
      * @param colName Dot-separated nested field name
      */
    def dropNestedColumn(colName: String): DataFrame = {
      DataFrameUtils.dropColumn(df, colName)
    }
  }
}

Usage:

import DataFrameUtils._
df.dropNestedColumn("a.b.c.d")
16
Michael Spector

Extension de la réponse spektom. Avec prise en charge des types de tableaux:

object DataFrameUtils {

  private def dropSubColumn(col: Column, colType: DataType, fullColName: String, dropColName: String): Option[Column] = {
    if (fullColName.equals(dropColName)) {
      None
    } else if (dropColName.startsWith(s"$fullColName.")) {
      colType match {
        case colType: StructType =>
          Some(struct(
            colType.fields
              .flatMap(f =>
                dropSubColumn(col.getField(f.name), f.dataType, s"$fullColName.${f.name}", dropColName) match {
                  case Some(x) => Some(x.alias(f.name))
                  case None => None
                })
              : _*))
        case colType: ArrayType =>
          colType.elementType match {
            case innerType: StructType =>
              Some(struct(innerType.fields
                .flatMap(f =>
                  dropSubColumn(col.getField(f.name), f.dataType, s"$fullColName.${f.name}", dropColName) match {
                    case Some(x) => Some(x.alias(f.name))
                    case None => None
                  })
                : _*))
          }

        case other => Some(col)
      }
    } else {
      Some(col)
    }
  }

  protected def dropColumn(df: DataFrame, colName: String): DataFrame = {
    df.schema.fields
      .flatMap(f => {
        if (colName.startsWith(s"${f.name}.")) {
          dropSubColumn(col(f.name), f.dataType, f.name, colName) match {
            case Some(x) => Some((f.name, x))
            case None => None
          }
        } else {
          None
        }
      })
      .foldLeft(df.drop(colName)) {
        case (df, (colName, column)) => df.withColumn(colName, column)
      }
  }

  /**
    * Extended version of DataFrame that allows to operate on nested fields
    */
  implicit class ExtendedDataFrame(df: DataFrame) extends Serializable {
    /**
      * Drops nested field from DataFrame
      *
      * @param colName Dot-separated nested field name
      */
    def dropNestedColumn(colName: String): DataFrame = {
      DataFrameUtils.dropColumn(df, colName)
    }
  }

}
4
mmendez.semantic

Après l'extrait de code de spektom pour scala, j'ai créé un code similaire en Java. Depuis Java 8 n'a pas foldLeft, j'ai utilisé forEachOrdered. Ce code convient pour spark 2.x (j'utilise 2.1)) que supprimer une colonne et l'ajouter en utilisant withColumn avec le même nom ne fonctionne pas, donc je remplace simplement la colonne, et cela semble fonctionner.

Le code n'est pas entièrement testé, espérons qu'il fonctionne :-)

public class DataFrameUtils {

public static Dataset<Row> dropNestedColumn(Dataset<Row> dataFrame, String columnName) {
    final DataFrameFolder dataFrameFolder = new DataFrameFolder(dataFrame);
    Arrays.stream(dataFrame.schema().fields())
        .flatMap( f -> {
           if (columnName.startsWith(f.name() + ".")) {
               final Optional<Column> column = dropSubColumn(col(f.name()), f.dataType(), f.name(), columnName);
               if (column.isPresent()) {
                   return Stream.of(new Tuple2<>(f.name(), column));
               } else {
                   return Stream.empty();
               }
           } else {
               return Stream.empty();
           }
        }).forEachOrdered(colTuple -> dataFrameFolder.accept(colTuple));

    return dataFrameFolder.getDF();
}

private static Optional<Column> dropSubColumn(Column col, DataType colType, String fullColumnName, String dropColumnName) {
    Optional<Column> column = Optional.empty();
    if (!fullColumnName.equals(dropColumnName)) {
        if (colType instanceof StructType) {
            if (dropColumnName.startsWith(fullColumnName + ".")) {
                column = Optional.of(struct(getColumns(col, (StructType)colType, fullColumnName, dropColumnName)));
            }
        } else {
            column = Optional.of(col);
        }
    }

    return column;
}

private static Column[] getColumns(Column col, StructType colType, String fullColumnName, String dropColumnName) {
    return Arrays.stream(colType.fields())
        .flatMap(f -> {
                    final Optional<Column> column = dropSubColumn(col.getField(f.name()), f.dataType(),
                            fullColumnName + "." + f.name(), dropColumnName);
                    if (column.isPresent()) {
                        return Stream.of(column.get().alias(f.name()));
                    } else {
                        return Stream.empty();
                    }
                }
        ).toArray(Column[]::new);

}

private static class DataFrameFolder implements Consumer<Tuple2<String, Optional<Column>>> {
    private Dataset<Row> df;

    public DataFrameFolder(Dataset<Row> df) {
        this.df = df;
    }

    public Dataset<Row> getDF() {
        return df;
    }

    @Override
    public void accept(Tuple2<String, Optional<Column>> colTuple) {
        if (!colTuple._2().isPresent()) {
            df = df.drop(colTuple._1());
        } else {
            df = df.withColumn(colTuple._1(), colTuple._2().get());
        }
    }
}

Exemple d'utilisation:

private class Pojo {
    private String str;
    private Integer number;
    private List<String> strList;
    private Pojo2 pojo2;

    public String getStr() {
        return str;
    }

    public Integer getNumber() {
        return number;
    }

    public List<String> getStrList() {
        return strList;
    }

    public Pojo2 getPojo2() {
        return pojo2;
    }

}

private class Pojo2 {
    private String str;
    private Integer number;
    private List<String> strList;

    public String getStr() {
        return str;
    }

    public Integer getNumber() {
        return number;
    }

    public List<String> getStrList() {
        return strList;
    }

}

SQLContext context = new SQLContext(new SparkContext("local[1]", "test"));
Dataset<Row> df = context.createDataFrame(Collections.emptyList(), Pojo.class);
Dataset<Row> dfRes = DataFrameUtils.dropNestedColumn(df, "pojo2.str");

Structure d'origine:

root
 |-- number: integer (nullable = true)
 |-- pojo2: struct (nullable = true)
 |    |-- number: integer (nullable = true)
 |    |-- str: string (nullable = true)
 |    |-- strList: array (nullable = true)
 |    |    |-- element: string (containsNull = true)
 |-- str: string (nullable = true)
 |-- strList: array (nullable = true)
 |    |-- element: string (containsNull = true)

Après la chute:

root
 |-- number: integer (nullable = true)
 |-- pojo2: struct (nullable = false)
 |    |-- number: integer (nullable = true)
 |    |-- strList: array (nullable = true)
 |    |    |-- element: string (containsNull = true)
 |-- str: string (nullable = true)
 |-- strList: array (nullable = true)
 |    |-- element: string (containsNull = true)
2
Lior Chaga

Une autre façon (PySpark) serait de supprimer le features.feat1 colonne en recréant features:

from pyspark.sql.functions import col, arrays_Zip

display(df
        .withColumn("features", arrays_Zip("features.feat2", "features.feat3"))
        .withColumn("features", col("features").cast(schema))
)

schema est le nouveau schéma (à l'exclusion de features.feat1).

from pyspark.sql.types import StructType, StructField, StringType

schema = StructType(
    [
      StructField('feat2', StringType(), True), 
      StructField('feat3', StringType(), True), 
    ]
  )
1
kiae

La bibliothèque Make Structs Easy * facilite l'exécution d'opérations telles que l'ajout, la suppression et le renommage de champs à l'intérieur de structures de données imbriquées. La bibliothèque est disponible dans les deux Scala et Python.

En supposant que vous disposez des données suivantes:

import org.Apache.spark.sql.functions._

case class Features(feat1: String, feat2: String, feat3: String)
case class Record(features: Features, arrayOfFeatures: Seq[Features])

val df = Seq(
   Record(Features("hello", "world", "!"), Seq(Features("red", "orange", "yellow"), Features("green", "blue", "Indigo")))
).toDF

df.printSchema

// root
//  |-- features: struct (nullable = true)
//  |    |-- feat1: string (nullable = true)
//  |    |-- feat2: string (nullable = true)
//  |    |-- feat3: string (nullable = true)
//  |-- arrayOfFeatures: array (nullable = true)
//  |    |-- element: struct (containsNull = true)
//  |    |    |-- feat1: string (nullable = true)
//  |    |    |-- feat2: string (nullable = true)
//  |    |    |-- feat3: string (nullable = true)

df.show(false)

// +-----------------+----------------------------------------------+
// |features         |arrayOfFeatures                               |
// +-----------------+----------------------------------------------+
// |[hello, world, !]|[[red, orange, yellow], [green, blue, Indigo]]|
// +-----------------+----------------------------------------------+

Puis en supprimant feat2 from features est aussi simple que:

import com.github.fqaiser94.mse.methods._

// drop feat2 from features
df.withColumn("features", $"features".dropFields("feat2")).show(false)

// +----------+----------------------------------------------+
// |features  |arrayOfFeatures                               |
// +----------+----------------------------------------------+
// |[hello, !]|[[red, orange, yellow], [green, blue, Indigo]]|
// +----------+----------------------------------------------+

J'ai remarqué qu'il y avait beaucoup de commentaires de suivi sur d'autres solutions demandant s'il y avait un moyen de supprimer une colonne imbriquée dans une structure imbriquée à l'intérieur d'un tableau. Cela peut être fait en combinant les fonctions fournies par la bibliothèque Make Structs Easy avec les fonctions fournies par la bibliothèque spark-hofs , comme suit:

import za.co.absa.spark.hofs._

// drop feat2 in each element of arrayOfFeatures
df.withColumn("arrayOfFeatures", transform($"arrayOfFeatures", features => features.dropFields("feat2"))).show(false)

// +-----------------+--------------------------------+
// |features         |arrayOfFeatures                 |
// +-----------------+--------------------------------+
// |[hello, world, !]|[[red, yellow], [green, Indigo]]|
// +-----------------+--------------------------------+

* Divulgation complète: je suis l'auteur de la bibliothèque Make Structs Easy qui est référencée dans cette réponse.

0
fqaiser94

Ajout de la solution Java version pour cela.

Classe utilitaire (transmettez votre ensemble de données et la colonne imbriquée qui doit être supprimée à la fonction dropNestedColumn).

(Il y a peu de bugs dans la réponse de Lior Chaga, je les ai corrigés pendant que j'essayais d'utiliser sa réponse).

public class NestedColumnActions {
/*
dataset : dataset in which we want to drop columns
columnName : nested column that needs to be deleted
*/
public static Dataset<?> dropNestedColumn(Dataset<?> dataset, String columnName) {

    //Special case of top level column deletion
    if(!columnName.contains("."))
        return dataset.drop(columnName);

    final DataSetModifier dataFrameFolder = new DataSetModifier(dataset);
    Arrays.stream(dataset.schema().fields())
            .flatMap(f -> {
                //If the column name to be deleted starts with current top level column
                if (columnName.startsWith(f.name() + DOT)) {
                    //Get new column structure under f , expected after deleting the required column
                    final Optional<Column> column = dropSubColumn(functions.col(f.name()), f.dataType(), f.name(), columnName);
                    if (column.isPresent()) {
                        return Stream.of(new Tuple2<>(f.name(), column));
                    } else {
                        return Stream.empty();
                    }
                } else {
                    return Stream.empty();
                }
            })
            //Call accept function with Tuples of (top level column name, new column structure under it)
            .forEach(colTuple -> dataFrameFolder.accept(colTuple));

    return dataFrameFolder.getDataset();
}

private static Optional<Column> dropSubColumn(Column col, DataType colType, String fullColumnName, String dropColumnName) {
    Optional<Column> column = Optional.empty();
    if (!fullColumnName.equals(dropColumnName)) {
        if (colType instanceof StructType) {
            if (dropColumnName.startsWith(fullColumnName + DOT)) {
                column = Optional.of(functions.struct(getColumns(col, (StructType) colType, fullColumnName, dropColumnName)));
            }
            else {
                column = Optional.of(col);
            }
        } else {
            column = Optional.of(col);
        }
    }

    return column;
}

private static Column[] getColumns(Column col, StructType colType, String fullColumnName, String dropColumnName) {
    return Arrays.stream(colType.fields())
            .flatMap(f -> {
                        final Optional<Column> column = dropSubColumn(col.getField(f.name()), f.dataType(),
                                fullColumnName + "." + f.name(), dropColumnName);
                        if (column.isPresent()) {
                            return Stream.of(column.get().alias(f.name()));
                        } else {
                            return Stream.empty();
                        }
                    }
            ).toArray(Column[]::new);

}

private static class DataSetModifier implements Consumer<Tuple2<String, Optional<Column>>> {
    private Dataset<?> df;

    public DataSetModifier(Dataset<?> df) {
        this.df = df;
    }

    public Dataset<?> getDataset() {
        return df;
    }

    /*
    colTuple[0]:top level column name
    colTuple[1]:new column structure under it
   */
    @Override
    public void accept(Tuple2<String, Optional<Column>> colTuple) {
        if (!colTuple._2().isPresent()) {
            df = df.drop(colTuple._1());
        } else {
            df = df.withColumn(colTuple._1(), colTuple._2().get());
        }
    }
}

}

0
Suryakant Pandey

Implémentation de PySpark

import pyspark.sql.functions as sf

def _drop_nested_field(
    schema: StructType,
    field_to_drop: str,
    parents: List[str] = None,
) -> Column:
    parents = list() if parents is None else parents
    src_col = lambda field_names: sf.col('.'.join(f'`{c}`' for c in field_names))

    if '.' in field_to_drop:
        root, subfield = field_to_drop.split('.', maxsplit=1)
        field_to_drop_from = next(f for f in schema.fields if f.name == root)

        return sf.struct(
            *[src_col(parents + [f.name]) for f in schema.fields if f.name != root],
            _drop_nested_field(
                schema=field_to_drop_from.dataType,
                field_to_drop=subfield,
                parents=parents + [root]
            ).alias(root)
        )

    else:
        # select all columns except the one to drop
        return sf.struct(
            *[src_col(parents + [f.name])for f in schema.fields if f.name != field_to_drop],
        )


def drop_nested_field(
    df: DataFrame,
    field_to_drop: str,
) -> DataFrame:
    if '.' in field_to_drop:
        root, subfield = field_to_drop.split('.', maxsplit=1)
        field_to_drop_from = next(f for f in df.schema.fields if f.name == root)

        return df.withColumn(root, _drop_nested_field(
            schema=field_to_drop_from.dataType,
            field_to_drop=subfield,
            parents=[root]
        ))
    else:
        return df.drop(field_to_drop)


df = drop_nested_field(df, 'a.b.c.d')
0
M.Vanderlee