If I have a spark dataframe full of JSON documents with the following schema, does the es-hadoop connector allow indexing them to elastic.
scala> df.printSchema
root
|-- address: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- location: string (nullable = true)
| | |-- std_city: string (nullable = true)
| | |-- std_state: string (nullable = true)
| | |-- std_street_name: string (nullable = true)
| | |-- std_street_number: string (nullable = true)
| | |-- std_zip: string (nullable = true)
|-- date_of_birth: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- dob_day: string (nullable = true)
| | |-- dob_full: string (nullable = true)
| | |-- dob_month: string (nullable = true)
| | |-- dob_year: string (nullable = true)
|-- names: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- first_name: string (nullable = true)
| | |-- first_name_list: string (nullable = true)
| | |-- fn_formalname_assocs: string (nullable = true)
| | |-- fn_nickname_assocs: string (nullable = true)
| | |-- last_name: string (nullable = true)
| | |-- last_name_list: string (nullable = true)
| | |-- ln_formalname_assocs: string (nullable = true)
| | |-- ln_nickname_assocs: string (nullable = true)
| | |-- middle_name: string (nullable = true)
| | |-- middle_name_list: string (nullable = true)
| | |-- mn_formalname_assocs: string (nullable = true)
| | |-- mn_nickname_assocs: string (nullable = true)
|-- phone: array (nullable = true)
| |-- element: string (containsNull = true)
|-- pm_id: string (nullable = true)
|-- id: string (nullable = true)