Hi team,
I have below code, but run with error 'com.fasterxml.jackson.core.JsonParseException: Unrecognized token 'acknowledged': was expecting ('true', 'false' or 'null')'.
I can't figure out why this error happen? How to resolve this?
# load package
from elasticsearch import Elasticsearch
import pandas as pd
import time
root_path = "C:/elkstack/elasticsearch-7.0.1-windows-x86_64/data/"
raw_data_path = root_path + "testcase/"
csv_filename = "testcase0801.csv"
t0 = time.time()
# size of the bulk
chunksize = 5000
# open csv file
f = open(raw_data_path + csv_filename) # read csv
# parse csv with pandas
csvfile = pd.read_csv(f,iterator = True, chunksize = chunksize)
# init a new instance of the Elasticsearch client class
es = Elasticsearch('http://localhost:9200/')
# Init a mapping
mapping = {
"index_patterns": ["test*"],
"settings": {
"number_of_shards": 1,
"number_of_replicas": 1,
"refresh_interval": "5s"
},
"mappings": {
"_doc": {
"dynamic_templates": [
{
"All": {
"match": "*",
"match_mapping_type": "*",
"mapping": {
"type": "text",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256}}}}}]
}}}
# put a template
mapping = es.indices.put_template(name = 'testcase', body = mapping, include_type_name = True )
# init index
try:
es.indices.delete("testcase")
except:
pass
es.indices.create(index = "testcase")
# start bulk indexing
print ("now indexing %s..."%(csv_filename))
for i, df in enumerate(csvfile):
print (i)
records = df.where(pd.notnull(df),None).T.to_dict()
list_records = [records[it] for it in records]
try:
es.bulk(index="testcase", doc_type="_doc", body=mapping)
except:
print ("error!, skip some test case sorry")
pass
i+=1
print ("done in %.3fs"%(time.time()-t0)).
The error is
[2019-09-02T16:15:31,721][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [testcase] for index patterns [test*] [2019-09-02T16:15:31,757][INFO ][o.e.c.m.MetaDataDeleteIndexService] [node-1] [testcase/q9RvOYpWSeSqX_aSflpO-A] deleting index [2019-09-02T16:15:31,885][INFO ][o.e.c.m.MetaDataCreateIndexService] [node-1] [testcase] creating index, cause [api], templates [testcase], shards [1]/[1], mappings [_doc] [2019-09-02T16:15:32,225][WARN ][r.suppressed ] [node-1] path: /testcase/_doc/_bulk, params: {index=testcase, type=_doc} com.fasterxml.jackson.core.JsonParseException: Unrecognized token 'acknowledged': was expecting ('true', 'false' or 'null') at [Source: org.elasticsearch.transport.netty4.ByteBufStreamInput@6ba3dd67; line: 1, column: 25] at com.fasterxml.jackson.core.JsonParser._constructError(JsonParser.java:1702) ~[jackson-core-2.8.11.jar:2.8.11] at com.fasterxml.jackson.core.base.ParserMinimalBase._reportError(ParserMinimalBase.java:558) ~[jackson-core-2.8.11.jar:2.8.11] at com.fasterxml.jackson.core.json.UTF8StreamJsonParser._reportInvalidToken(UTF8StreamJsonParser.java:3528) ~[jackson-core-2.8.11.jar:2.8.11] at com.fasterxml.jackson.core.json.UTF8StreamJsonParser._handleUnexpectedValue(UTF8StreamJsonParser.java:2686) ~[jackson-core-2.8.11.jar:2.8.11] at com.fasterxml.jackson.core.json.UTF8StreamJsonParser._nextTokenNotInObject(UTF8StreamJsonParser.java:878) ~[jackson-core-2.8.11.jar:2.8.11] at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.nextToken(UTF8StreamJsonParser.java:772) ~[jackson-core-2.8.11.jar:2.8.11] at org.elasticsearch.common.xcontent.json.JsonXContentParser.nextToken(JsonXContentParser.java:52) ~[elasticsearch-x-content-7.0.1.jar:7.0.1] at org.elasticsearch.action.bulk.BulkRequestParser.parse(BulkRequestParser.java:159) ~[elasticsearch-7.0.1.jar:7.0.1] at org.elasticsearch.action.bulk.BulkRequest.add(BulkRequest.java:294) ~[elasticsearch-7.0.1.jar:7.0.1] at org.elasticsearch.rest.action.document.RestBulkAction.prepareRequest(RestBulkAction.java:97) ~[elasticsearch-7.0.1.jar:7.0.1] at org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:92) ~[elasticsearch-7.0.1.jar:7.0.1]