[instance-0000000007] failing shard [failed shard, shard [apm-7.4.1-span-000220][0], node[Ze2d_-UqTHGSkDwCTy5jSA], [R], recovery_source[peer recovery], s[INITIALIZING], a[id=iFMoRIOVTomQk1tutry9gg], unassigned_info[[reason=MANUAL_ALLOCATION], at[2020-06-04T07:31:34.142Z], delayed=false, details[failed shard on node [Ze2d_-UqTHGSkDwCTy5jSA]: failed recovery, failure RecoveryFailedException[[apm-7.4.1-span-000220][0]: Recovery failed from {instance-0000000005}{mllz1SCYQ56TTXNCVjz1xg}{x6dj819tTcGpWWLd-LenLw}{10.46.32.111}{10.46.32.111:19348}{dim}{logical_availability_zone=zone-1, server_name=instance-0000000005.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-a, xpack.installed=true, region=unknown-region, instance_configuration=gcp.data.highio.1} into {instance-0000000007}{Ze2d_-UqTHGSkDwCTy5jSA}{O_NApsw1R--8TLgVLgkxIQ}{10.46.32.98}{10.46.32.98:19637}{dim}{logical_availability_zone=zone-0, server_name=instance-0000000007.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-b, xpack.installed=true, instance_configuration=gcp.data.highio.1, region=unknown-region}]; nested: RemoteTransportException[[instance-0000000005][172.17.0.7:19348][internal:index/shard/recovery/start_recovery]]; nested: CircuitBreakingException[[parent] Data too large, data for [<transport_request>] would be [396196036/377.8mb], which is larger than the limit of [394910105/376.6mb], real usage: [396195488/377.8mb], new bytes reserved: [548/548b], usages [request=82032/80.1kb, fielddata=626/626b, in_flight_requests=548/548b, accounting=67296/65.7kb]]; ], allocation_status[no_attempt]], message [failed recovery], failure [RecoveryFailedException[[apm-7.4.1-span-000220][0]: Recovery failed from {instance-0000000005}{mllz1SCYQ56TTXNCVjz1xg}{x6dj819tTcGpWWLd-LenLw}{10.46.32.111}{10.46.32.111:19348}{dim}{logical_availability_zone=zone-1, server_name=instance-0000000005.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-a, xpack.installed=true, region=unknown-region, instance_configuration=gcp.data.highio.1} into {instance-0000000007}{Ze2d_-UqTHGSkDwCTy5jSA}{O_NApsw1R--8TLgVLgkxIQ}{10.46.32.98}{10.46.32.98:19637}{dim}{logical_availability_zone=zone-0, server_name=instance-0000000007.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-b, xpack.installed=true, instance_configuration=gcp.data.highio.1, region=unknown-region}]; nested: RemoteTransportException[[instance-0000000005][172.17.0.7:19348][internal:index/shard/recovery/start_recovery]]; nested: RecoveryEngineException[Phase[1] prepare target for translog failed]; nested: RemoteTransportException[[instance-0000000007][172.17.0.5:19637][internal:index/shard/recovery/prepare_translog]]; nested: CircuitBreakingException[[parent] Data too large, data for [<transport_request>] would be [401691300/383mb], which is larger than the limit of [394910105/376.6mb], real usage: [401690832/383mb], new bytes reserved: [468/468b], usages [request=65592/64kb, fielddata=626/626b, in_flight_requests=468/468b, accounting=67296/65.7kb]]; ], markAsStale [true]] org.elasticsearch.indices.recovery.RecoveryFailedException: [apm-7.4.1-span-000220][0]: Recovery failed from {instance-0000000005}{mllz1SCYQ56TTXNCVjz1xg}{x6dj819tTcGpWWLd-LenLw}{10.46.32.111}{10.46.32.111:19348}{dim}{logical_availability_zone=zone-1, server_name=instance-0000000005.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-a, xpack.installed=true, region=unknown-region, instance_configuration=gcp.data.highio.1} into {instance-0000000007}{Ze2d_-UqTHGSkDwCTy5jSA}{O_NApsw1R--8TLgVLgkxIQ}{10.46.32.98}{10.46.32.98:19637}{dim}{logical_availability_zone=zone-0, server_name=instance-0000000007.2bbbae81d213405dad0a72515dc00fb3, availability_zone=asia-northeast1-b, xpack.installed=true, instance_configuration=gcp.data.highio.1, region=unknown-region} at org.elasticsearch.indices.recovery.PeerRecoveryTargetService.lambda$doRecovery$2(PeerRecoveryTargetService.java:245) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.indices.recovery.PeerRecoveryTargetService$1.handleException(PeerRecoveryTargetService.java:290) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.PlainTransportFuture.handleException(PlainTransportFuture.java:97) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1120) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1120) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.InboundHandler.lambda$handleException$2(InboundHandler.java:243) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:703) ~[elasticsearch-7.4.1.jar:7.4.1] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:830) [?:?] Caused by: org.elasticsearch.transport.RemoteTransportException: [instance-0000000005][172.17.0.7:19348][internal:index/shard/recovery/start_recovery] Caused by: org.elasticsearch.index.engine.RecoveryEngineException: Phase[1] prepare target for translog failed at org.elasticsearch.indices.recovery.RecoverySourceHandler.lambda$prepareTargetForTranslog$34(RecoverySourceHandler.java:635) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.action.ActionListener$1.onFailure(ActionListener.java:70) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.action.ActionListener$1.onFailure(ActionListener.java:70) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.action.ActionListenerResponseHandler.handleException(ActionListenerResponseHandler.java:59) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.PlainTransportFuture.handleException(PlainTransportFuture.java:97) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1120) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1120) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.InboundHandler.lambda$handleException$2(InboundHandler.java:243) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:703) ~[elasticsearch-7.4.1.jar:7.4.1] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:830) ~[?:?] Caused by: org.elasticsearch.transport.RemoteTransportException: [instance-0000000007][172.17.0.5:19637][internal:index/shard/recovery/prepare_translog] Caused by: org.elasticsearch.common.breaker.CircuitBreakingException: [parent] Data too large, data for [<transport_request>] would be [401691300/383mb], which is larger than the limit of [394910105/376.6mb], real usage: [401690832/383mb], new bytes reserved: [468/468b], usages [request=65592/64kb, fielddata=626/626b, in_flight_requests=468/468b, accounting=67296/65.7kb] at org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.checkParentLimit(HierarchyCircuitBreakerService.java:343) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker.addEstimateBytesAndMaybeBreak(ChildMemoryCircuitBreaker.java:128) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:170) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:118) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:102) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:663) ~[elasticsearch-7.4.1.jar:7.4.1] at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:62) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) ~[?:?] at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:328) ~[?:?] at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:302) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) ~[?:?] at io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:241) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) ~[?:?] at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1475) ~[?:?] at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1224) ~[?:?] at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1271) ~[?:?] at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:505) ~[?:?] at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:444) ~[?:?] at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:283) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) ~[?:?] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1421) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) ~[?:?] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) ~[?:?] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:930) ~[?:?] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) ~[?:?] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:697) ~[?:?] at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:597) ~[?:?] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:551) ~[?:?] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:511) ~[?:?] at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:918) ~[?:?] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[?:?] at java.lang.Thread.run(Thread.java:830) ~[?:?]