[spark-postgre] csv special character
the previous csv writer was uniformizing newlines and handle special case character: I suspect those cases could break the current implementation:
def dataframeToPgCsv(spark: SparkSession, dfSimple: Dataset[Row], schemaQueryComplex: StructType): DataFrame = {
val tableTmp = "table_" + randomUUID.toString.replaceAll(".*-", "")
dfSimple.createOrReplaceTempView(tableTmp)
val sqlQuery = "SELECT " + schemaQueryComplex.map(a => {
if (a.dataType.simpleString.indexOf("array") == 0) {
"REGEXP_REPLACE(REGEXP_REPLACE(CAST(" + sanS(a.name) + " AS string), '^.', '{'), '.$', '}') AS " + sanS(a.name)
} else if (a.dataType.simpleString.indexOf("string") == 0) {
"REGEXP_REPLACE(REGEXP_REPLACE(" + sanS(a.name) + ", '\\u0000', ''),'\r\n|\r','\n') AS " + sanS(a.name) // this character breaks postgresql parser
} else {
sanS(a.name)
}
})
.mkString(", ") + " FROM " + sanS(tableTmp)
spark.sql(sqlQuery)
}