[elasticsearch] elasticsearch.yml 설정 properties...

Elastic/Elasticsearch 2014. 2. 17. 17:30

elasticsearch 1.0.0 기준 입니다.

그냥 default 만 보셔도 뭐 되지만 혹시라도 궁금하신 분들이 계실 수도 있어서 공유해 봅니다.

대부분 값은 default 설정 값이니 별도 튜닝은 하셔야 합니다.

여기서 어떤 값을 튜닝 하느냐에 따라서 성능이 좋아 질수도 나빠 진수도 있겠죠. :)


#[SERVER]
bootstrap.mlockall: true            # swap avoid, ulimit -l unlimited

#[CLUSTER]
#[ClusterName, NodeBuilder, TribeService settings]
cluster.name: gsshop_genie_cluster

#[ConcurrentRebalanceAllocationDecider]
cluster.routing.allocation.cluster_concurrent_rebalance: 2

#[ThrottlingAllocationDecider]
cluster.routing.allocation.node_initial_primaries_recoveries: 4
cluster.routing.allocation.node_concurrent_recoveries: 2

#[ClusterRebalanceAllocationDecider]
cluster.routing.allocation.allow_rebalance: "indices_all_active"

#[EnableAllocationDecider]
cluster.routing.allocation.enable: "all"
index.routing.allocation.enable: "all"

#[NODE]
#[DiscoveryNode, DiscoveryNodeService]
node.name: NODE_NAME
node.master: true
node.data: true
node.mode: network

#[INDEX - IndexDynamicSettingsModule]
#[IndexMetaData settings]
index.number_of_shards: 5
index.number_of_replicas: 1

#[settings]
index.mapper.dynamic: true

#[IndexStoreModule settings]
index.store.type: "mmapfs"

#[IndexDynamicSettingsModule settings]
index.compound_format: false
index.compound_on_flush: true
index.shard.check_on_startup: false        # true/fix/false

#[IndexFieldDataService, IndicesFilterCache settings]
index.fielddata.cache: "node"            # resident(in memory), soft(OOM control), node(default)
index.cache.filter.size: -1
index.cache.filter.expire: -1

#[InternalIndexShard settings]
index.refresh_interval: "1s"

#[IndexDynamicSettingsModule, TieredMergePolicyProvider settings]
index.merge.async: true
index.merge.policy.expunge_deletes_allowed: 10
index.merge.policy.floor_segment: 2mb
index.merge.policy.max_merge_at_once: 10
index.merge.policy.max_merge_at_once_explicit: 30
index.merge.policy.max_merged_segment: 5gb
index.merge.policy.segments_per_tier: 10
index.reclaim_deletes_weight: 2.0

#[TranslogModule, TranslogService, FsTranslog settings]
index.translog.fs.type: simple
index.translog.flush_threshold_ops: 5000
index.translog.flush_threshold_size: 200mb
index.translog.flush_threshold_period: 30m

#[LocalGatewayAllocator settings]
index.recovery.list_itmeout: "30s"
index.recovery.initial_shards: "quorum"

#[InternalEngine, IndexingMemoryController settings]
indices.memory.index_buffer_size: 10%

#[RecoverySettings settings]
index.shard.recovery.file_chunk_size: 512kb
index.shard.recovery.translog_ops: 1000
index.shard.recovery.translog_ops: 512kb
indices.recovery.max_bytes_per_sec: 0
indices.recovery.concurrent_streams: 3
index.shard.recovery.concurrent_small_file_streams: 2

#[IndicesFieldDataCache settings]
indices.fielddata.cache.size: 20%
indices.fielddata.cache.expire: 15m

#[IndicesFilterCache settings]
indices.cache.filter.size: "20%"
indices.cache.filter.expire: 15m
indices.cache.clean_interval: "60s"

#[AutoCreateIndex settings]
action.auto_create_index: true

#[TransportNodesShutdownAction settings]
action.disable_shutdown: false        # rest api parameter : delay(default 200ms)

#[TransportShardReplicationOperationAction settings]
action.replication_type: async
action.write_consistency: quorum

#[NETWORK]
#[NetworkService settings]
network.host: NODE_IP
network.tcp.no_delay: true
network.tcp.reuse_address: true

#[Transport, NettyTransport settings]
transport.host: NODE_IP
transport.tcp.port: 9300
transport.tcp.connect_timeout: 30s
transport.tcp.compress: true

#[NettyHttpServerTransport settings]
http.port: 9200
http.max_content_length: 100mb
http.compression: true
http.compression_level: 6

#[InternalNode, TribeService settings]
http.enabled: true

#[ThreadPool default settings]
threadpool.generic.type: cached
threadpool.generic.keep_alive: "30s"
threadpool.index.type: fixed
threadpool.index.size: 2                # availableProcessors
threadpool.index.queue_size: 200
threadpool.bulk.type: fixed
threadpool.bulk.size: 2                 # availableProcessors
threadpool.bulk.queue_size: 50
threadpool.get.type: fixed
threadpool.get.size: 2                    # availableProcessors
threadpool.get.queue_size: 1000
threadpool.search.type: fixed
threadpool.search.size: 6                # availableProcessors x 3
threadpool.search.queue_size: 1000
threadpool.suggest.type: fixed
threadpool.suggest.size: 2                    # availableProcessors
threadpool.suggest.queue_size: 1000
threadpool.percolate.type: fixed
threadpool.percolate.size: 2                    # availableProcessors
threadpool.percolate.queue_size: 1000
threadpool.management.type: scailing
threadpool.management.keep_alive: "5m"
threadpool.management.size: 5
threadpool.flush.type: scailing
threadpool.flush.keep_alive: "5m"
threadpool.flush.size: 2                # Math.min( ( ( availableProcessors + 1 ) / 2 ), 5 )
threadpool.merge.type: scailing
threadpool.merge.keep_alive: "5m"
threadpool.merge.size: 2                # Math.min( ( ( availableProcessors + 1 ) / 2 ), 5 )
threadpool.refresh.type: scailing
threadpool.refresh.keep_alive: "5m"
threadpool.refresh.size: 2                # Math.min( ( ( availableProcessors + 1 ) / 2 ), 10 )
threadpool.warmer.type: scailing
threadpool.warmer.keep_alive: "5m"
threadpool.warmer.size: 5
threadpool.snapshot.type: scailing
threadpool.snapshot.keep_alive: "5m"
threadpool.snapshot.size: 5
threadpool.optimize.type: fixed
threadpool.optimize.size: 1

#[GATEWAY]
#[GatewayModule, GatewayService settings]
gateway.type: local
gateway.recover_after_nodes: 1
gateway.recover_after_time: 1m
gateway.recover_after_data_nodes: 1
gateway.recover_after_master_nodes: 1
gateway.expected_nodes: 1
gateway.expected_data_nodes: 1
gateway.expected_master_nodes: 1

#[ZenDiscovery, ZenPingService, UnicastZenPing settings]
discovery.zen.ping.multicast.enabled: false
discovery.zen.minimum_master_nodes: 2            # master node N 일 경우 (N/2 + 1)
discovery.zen.ping.timeout: 3s
discovery.zen.ping.unicast.hosts: ["NODE_IP:NODE_PORT", ..., "NODE_IP:NODE_PORT"]
discovery.zen.ping.unicast.concurrent_connects: 10


: