Note: This was written using elasticsearch 0.9.
Elasticsearch will automatically create an index (with basic settings and mappings) for you if you post a first document:
$ curl -X POST 'http://localhost:9200/thegame/weapons/1' -d \
'{
"_id": 1,| /* | |
| Author: Akshay Nagpal (github.com/akshaynagpal) | |
| License: MIT | |
| */ | |
| $(document).ready(function(){ | |
| // clicking button with class "category-button" | |
| $(".category-button").click(function(){ | |
| // get the data-filter value of the button | |
| var filterValue = $(this).attr('data-filter'); |
| SELECT round(DBMS_LOB.getlength(A.FILE)/1024) KB FROM MY_TABLE A |
| nginx/ | |
| !nginx/.gitkeep | |
| !nginx/logs/.gitkeep | |
| src/ | |
| tmp/ |
Note: This was written using elasticsearch 0.9.
Elasticsearch will automatically create an index (with basic settings and mappings) for you if you post a first document:
$ curl -X POST 'http://localhost:9200/thegame/weapons/1' -d \
'{
"_id": 1,| If you want, I can try and help with pointers as to how to improve the indexing speed you get. Its quite easy to really increase it by using some simple guidelines, for example: | |
| - Use create in the index API (assuming you can). | |
| - Relax the real time aspect from 1 second to something a bit higher (index.engine.robin.refresh_interval). | |
| - Increase the indexing buffer size (indices.memory.index_buffer_size), it defaults to the value 10% which is 10% of the heap. | |
| - Increase the number of dirty operations that trigger automatic flush (so the translog won't get really big, even though its FS based) by setting index.translog.flush_threshold (defaults to 5000). | |
| - Increase the memory allocated to elasticsearch node. By default its 1g. | |
| - Start with a lower replica count (even 0), and then once the bulk loading is done, increate it to the value you want it to be using the update_settings API. This will improve things as possibly less shards will be allocated to each machine. | |
| - Increase the number of machines you have so |
| #!/bin/bash | |
| # Script used to setup elasticsearch. Can be run as a regular user (needs sudo) | |
| ES_USER="elasticsearch" | |
| ES_GROUP="$ES_USER" | |
| ES_HOME="/usr/local/share/elasticsearch" | |
| ES_CLUSTER="clustername" | |
| ES_DATA_PATH="/var/data/elasticsearch" | |
| ES_LOG_PATH="/var/log/elasticsearch" | |
| ES_HEAP_SIZE=1024 |
| package com.willvuong.foodie.controller; | |
| import com.willvuong.foodie.dao.PlaceRepository; | |
| import com.willvuong.foodie.domain.Place; | |
| import org.springframework.beans.factory.annotation.Autowired; | |
| import org.springframework.data.repository.CrudRepository; | |
| import org.springframework.stereotype.Controller; | |
| import org.springframework.web.bind.annotation.RequestMapping; |
| cd ~ | |
| sudo yum update | |
| sudo yum install java-1.7.0-openjdk.i686 -y | |
| wget https://github.com/downloads/elasticsearch/elasticsearch/elasticsearch-0.19.9.tar.gz -O elasticsearch.tar.gz | |
| tar -xf elasticsearch.tar.gz | |
| rm elasticsearch.tar.gz | |
| mv elasticsearch-* elasticsearch | |
| sudo mv elasticsearch /usr/local/share |
| # stacktrace java as one message | |
| multiline { | |
| #type => "all" # no type means for all inputs | |
| pattern => "(^.+Exception: .+)|(^\s+at .+)|(^\s+... \d+ more)|(^\s*Caused by:.+)" | |
| what => "previous" | |
| } | |