Потому что у вас не edge ngram, a обычный ngram. У вас вот что индексирутся:
POST test/_analyze
{
"text": ["Рябов Артур Артур"],
"field": "localizedName.name"
}
{
"tokens" : [
{
"token" : "р",
"start_offset" : 0,
"end_offset" : 1,
"type" : "word",
"position" : 0
},
{
"token" : "ря",
"start_offset" : 0,
"end_offset" : 2,
"type" : "word",
"position" : 1
},
{
"token" : "ряб",
"start_offset" : 0,
"end_offset" : 3,
"type" : "word",
"position" : 2
},
{
"token" : "рябо",
"start_offset" : 0,
"end_offset" : 4,
"type" : "word",
"position" : 3
},
{
"token" : "рябов",
"start_offset" : 0,
"end_offset" : 5,
"type" : "word",
"position" : 4
},
{
"token" : "я",
"start_offset" : 1,
"end_offset" : 2,
"type" : "word",
"position" : 5
},
{
"token" : "яб",
"start_offset" : 1,
"end_offset" : 3,
"type" : "word",
"position" : 6
},
{
"token" : "ябо",
"start_offset" : 1,
"end_offset" : 4,
"type" : "word",
"position" : 7
},
{
"token" : "ябов",
"start_offset" : 1,
"end_offset" : 5,
"type" : "word",
"position" : 8
},
{
"token" : "б",
"start_offset" : 2,
"end_offset" : 3,
"type" : "word",
"position" : 9
},
{
"token" : "бо",
"start_offset" : 2,
"end_offset" : 4,
"type" : "word",
"position" : 10
},
{
"token" : "бов",
"start_offset" : 2,
"end_offset" : 5,
"type" : "word",
"position" : 11
},
{
"token" : "о",
"start_offset" : 3,
"end_offset" : 4,
"type" : "word",
"position" : 12
},
{
"token" : "ов",
"start_offset" : 3,
"end_offset" : 5,
"type" : "word",
"position" : 13
},
{
"token" : "в",
"start_offset" : 4,
"end_offset" : 5,
"type" : "word",
"position" : 14
},
{
"token" : "а",
"start_offset" : 6,
"end_offset" : 7,
"type" : "word",
"position" : 15
},
{
"token" : "ар",
"start_offset" : 6,
"end_offset" : 8,
"type" : "word",
"position" : 16
},
{
"token" : "арт",
"start_offset" : 6,
"end_offset" : 9,
"type" : "word",
"position" : 17
},
{
"token" : "арту",
"start_offset" : 6,
"end_offset" : 10,
"type" : "word",
"position" : 18
},
{
"token" : "артур",
"start_offset" : 6,
"end_offset" : 11,
"type" : "word",
"position" : 19
},
{
"token" : "р",
"start_offset" : 7,
"end_offset" : 8,
"type" : "word",
"position" : 20
},
{
"token" : "рт",
"start_offset" : 7,
"end_offset" : 9,
"type" : "word",
"position" : 21
},
{
"token" : "рту",
"start_offset" : 7,
"end_offset" : 10,
"type" : "word",
"position" : 22
},
{
"token" : "ртур",
"start_offset" : 7,
"end_offset" : 11,
"type" : "word",
"position" : 23
},
{
"token" : "т",
"start_offset" : 8,
"end_offset" : 9,
"type" : "word",
"position" : 24
},
{
"token" : "ту",
"start_offset" : 8,
"end_offset" : 10,
"type" : "word",
"position" : 25
},
{
"token" : "тур",
"start_offset" : 8,
"end_offset" : 11,
"type" : "word",
"position" : 26
},
{
"token" : "у",
"start_offset" : 9,
"end_offset" : 10,
"type" : "word",
"position" : 27
},
{
"token" : "ур",
"start_offset" : 9,
"end_offset" : 11,
"type" : "word",
"position" : 28
},
{
"token" : "р",
"start_offset" : 10,
"end_offset" : 11,
"type" : "word",
"position" : 29
},
{
"token" : "а",
"start_offset" : 12,
"end_offset" : 13,
"type" : "word",
"position" : 30
},
{
"token" : "ар",
"start_offset" : 12,
"end_offset" : 14,
"type" : "word",
"position" : 31
},
{
"token" : "арт",
"start_offset" : 12,
"end_offset" : 15,
"type" : "word",
"position" : 32
},
{
"token" : "арту",
"start_offset" : 12,
"end_offset" : 16,
"type" : "word",
"position" : 33
},
{
"token" : "артур",
"start_offset" : 12,
"end_offset" : 17,
"type" : "word",
"position" : 34
},
{
"token" : "р",
"start_offset" : 13,
"end_offset" : 14,
"type" : "word",
"position" : 35
},
{
"token" : "рт",
"start_offset" : 13,
"end_offset" : 15,
"type" : "word",
"position" : 36
},
{
"token" : "рту",
"start_offset" : 13,
"end_offset" : 16,
"type" : "word",
"position" : 37
},
{
"token" : "ртур",
"start_offset" : 13,
"end_offset" : 17,
"type" : "word",
"position" : 38
},
{
"token" : "т",
"start_offset" : 14,
"end_offset" : 15,
"type" : "word",
"position" : 39
},
{
"token" : "ту",
"start_offset" : 14,
"end_offset" : 16,
"type" : "word",
"position" : 40
},
{
"token" : "тур",
"start_offset" : 14,
"end_offset" : 17,
"type" : "word",
"position" : 41
},
{
"token" : "у",
"start_offset" : 15,
"end_offset" : 16,
"type" : "word",
"position" : 42
},
{
"token" : "ур",
"start_offset" : 15,
"end_offset" : 17,
"type" : "word",
"position" : 43
},
{
"token" : "р",
"start_offset" : 16,
"end_offset" : 17,
"type" : "word",
"position" : 44
}
]
}
Кроме того, вы фразу в поиске тоже через анализатор прогоняете. Я не знаю, что вы этим хотите добиться, но результат, который вы получаете, вполне соответствует тому, что вы делаете.