Skip to content

Commit

Permalink
chore: give example and ref on using ES env that requires basic auth (#…
Browse files Browse the repository at this point in the history
…489)

* chore: rm incomplete example

* chore: add basic auth example

* chore: add auth support

* docs: add ref to basic auth example

* Update installation.md
  • Loading branch information
mmta authored Feb 6, 2024
1 parent fb7bd32 commit 07b3718
Show file tree
Hide file tree
Showing 10 changed files with 393 additions and 136 deletions.
Binary file not shown.
Binary file not shown.
190 changes: 190 additions & 0 deletions deployments/docker/conf/logstash/conf-auth.d/80_siem.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
#########################################
# From Dsiem plugins #
#########################################

filter {
if [@metadata][siem_data_type] == "normalizedEvent" {
uuid {
target => "event_id"
overwrite => true
}
}
}

output{
if [@metadata][siem_data_type] == "normalizedEvent" {
# to dsiem
http {
format=>"json"
http_method=>"post"
url=>"http://dsiem:8080/events"
}
# to elasticsearch
elasticsearch {
hosts => "elasticsearch:9200"
index => "siem_events-%{+YYYY.MM.dd}"
document_id => "%{[event_id]}"
action => "index"
template => "/etc/logstash/index-template.d/siem_events-template.json"
template_name => "siem_events"
template_overwrite => true
user => "${ELASTICSEARCH_USERNAME}"
password => "${ELASTICSEARCH_PASSWORD}"
}
}
}

#########################################
# From Dsiem's Filebeat #
#########################################

filter {
if [siem_data_type] == "alarm_events" {
mutate {
add_field => {
"[@metadata][siem_data_type]" => "alarm_events"
}
}
prune {
whitelist_names => [ "@metadata", "@timestamp", "alarm_id", "event_id", "stage" ]
}
}

if [siem_data_type] == "alarms" {
date {
match => [ "created_time", "UNIX" ]
target => "timestamp"
}
date {
match => [ "update_time", "UNIX" ]
target => "updated_time"
}
mutate {
add_field => {
"[@metadata][alarm_id]" => "%{[alarm_id]}"
"[@metadata][siem_data_type]" => "alarms"
}
}

# set target_index to the actual index for an existing ID (perm_index).
# lookup is done against siem_alarms_id_lookup alias which is assigned to all new index
# by default. This alias can then be managed separately to cover, for example, only
# the last 3 indices.

elasticsearch {
hosts => ["elasticsearch:9200"]
index => "siem_alarms_id_lookup"
query => "_id:%{[alarm_id]}"
fields => {
"perm_index" => "[@metadata][target_index]"
}
user => "${ELASTICSEARCH_USERNAME}"
password => "${ELASTICSEARCH_PASSWORD}"
}

# if previous step failed or couldn't find a match in the case of new ID, then use today's date
if ![@metadata][target_index] {
mutate {
add_field => {
"[@metadata][target_index]" => "siem_alarms-%{+YYYY.MM.dd}"
}
}
}

# elasticsearch filter plugin only search within _source, so the following extra perm_index field is necessary
mutate {
add_field => {
"perm_index" => "%{[@metadata][target_index]}"
}
}
prune {
whitelist_names => [ "timestamp", "@metadata", "title", "status", "kingdom", "category",
"updated_time", "risk", "risk_class", "tag$", "src_ips", "dst_ips", "intel_hits", "vulnerabilities",
"networks", "rules", "custom_data", "^perm_index$" ]
}

# debugging only:
# mutate { add_field => { "alarm_id" => "%{[@metadata][alarm_id]}" }}
# ruby { code => 'logger.info("Dsiem alarm processing: ready to output ID ", "value" => event.get("[@metadata][alarm_id]"))' }
}
}

output {
if [@metadata][siem_data_type] == "alarm_events" {
elasticsearch {
hosts => "elasticsearch:9200"
index => "siem_alarm_events-%{+YYYY.MM.dd}"
template => "/etc/logstash/index-template.d/siem_alarm_events-template.json"
template_name => "siem_alarm_events"
template_overwrite => true
user => "${ELASTICSEARCH_USERNAME}"
password => "${ELASTICSEARCH_PASSWORD}"
}
}

# This one uses update action and doc_as_upsert to allow partial updates
if [@metadata][siem_data_type] == "alarms" {

# debugging only:
# elasticsearch { hosts => "elasticsearch:9200" index => "siem_alarms_debug" }

elasticsearch {
hosts => "elasticsearch:9200"
index => "%{[@metadata][target_index]}"
document_id => "%{[@metadata][alarm_id]}"
template => "/etc/logstash/index-template.d/siem_alarms-template.json"
template_name => "siem_alarms"
template_overwrite => true
user => "${ELASTICSEARCH_USERNAME}"
password => "${ELASTICSEARCH_PASSWORD}"
action => "update"
# use doc_as_upsert and script so that:
# - incoming doc is automatically indexed when document_id doesn't yet exist
# - for existing docs, we can selectively discard out-of-order updates and status/tag updates,
# without having to use external versioning
doc_as_upsert => true
script_lang => "painless"
script_type => "inline"
# lower risk value for an incoming update means it's out of order
# the same goes for updated_time, but should only be checked when incoming update
# doesn't have a higher risk
script => '
int incoming_risk = params.event.get("risk");
int existing_risk = ctx._source.risk;

if (incoming_risk < existing_risk) {
ctx.op = "none";
return
} else if (incoming_risk == existing_risk) {
ZonedDateTime old_tm = ZonedDateTime.parse(ctx._source.updated_time);
ZonedDateTime new_tm = ZonedDateTime.parse(params.event.get("updated_time"));
if (new_tm.isBefore(old_tm)) {
ctx.op = "none";
return
}
}
ctx._source.timestamp = params.event.get("timestamp");
ctx._source.updated_time = params.event.get("updated_time");
ctx._source.risk = incoming_risk;
ctx._source.risk_class = params.event.get("risk_class");
ctx._source.src_ips = params.event.get("src_ips");
ctx._source.dst_ips = params.event.get("dst_ips");
ctx._source.rules = params.event.get("rules");
ctx._source.networks = params.event.get("networks");

if (params.event.get("intel_hits") != null) {
ctx._source.intel_hits = params.event.get("intel_hits")
}

if (params.event.get("vulnerabilities") != null) {
ctx._source.vulnerabilities = params.event.get("vulnerabilities")
}

if (params.event.get("custom_data") != null) {
ctx._source.custom_data = params.event.get("custom_data")
}
'
retry_on_conflict => 5
}
}
}
13 changes: 13 additions & 0 deletions deployments/docker/conf/logstash/conf-auth.d/99_output.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
output {
if [application] == "suricata" {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "suricata-%{+YYYY.MM.dd}"
template => "/etc/logstash/index-template.d/suricata-template.json"
template_name => "suricata"
template_overwrite => true
user => "${ELASTICSEARCH_USERNAME}"
password => "${ELASTICSEARCH_PASSWORD}"
}
}
}
142 changes: 142 additions & 0 deletions deployments/docker/docker-compose-basic-auth.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
# this requires ES_USERNAME and ES_PASSWORD to be set in the environment
# you can set them in the .env file in the same directory as the docker-compose.yml,
# or just export them in the shell before running docker-compose up:
#
# export ES_USERNAME=elastic
# export ES_PASSWORD=changeme
# export PROMISC_INTERFACE=eth0
# docker-compose -f do docker-compose-basic-auth.yml up

version: "3"
services:

elasticsearch:
container_name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
environment:
- discovery.type=single-node
- "ES_JAVA_OPTS=-Xms256m -Xmx256m"
- cluster.routing.allocation.disk.threshold_enabled=false
- xpack.monitoring.enabled=false
- xpack.ml.enabled=false
- xpack.graph.enabled=false
- xpack.watcher.enabled=false
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ES_PASSWORD}
- http.cors.enabled=true
- http.cors.allow-credentials=true
- http.cors.allow-headers=Content-Type,Content-Length,Authorization
- http.cors.allow-origin=/https?:\/\/localhost(:[0-9]+)?/
ports:
- 9200:9200
networks:
- siemnet
volumes:
- es-data:/usr/share/elasticsearch/data

logstash:
container_name: logstash
image: defenxor/docker-logstash:7.11.0
command:
- -f/etc/logstash/conf.d
environment:
- xpack.monitoring.enabled=false
- ELASTICSEARCH_USERNAME=${ES_USERNAME}
- ELASTICSEARCH_PASSWORD=${ES_PASSWORD}
networks:
- siemnet
volumes:
- ./conf/logstash/conf.d:/etc/logstash/conf.d
- ./conf/logstash/conf-auth.d/80_siem.conf:/etc/logstash/conf.d/80_siem.conf
- ./conf/logstash/conf-auth.d/99_output.conf:/etc/logstash/conf.d/99_output.conf
- ./conf/logstash/index-template.d/es7:/etc/logstash/index-template.d
depends_on:
- elasticsearch

kibana:
container_name: kibana
image: docker.elastic.co/kibana/kibana:7.11.0
environment:
- xpack.monitoring.ui.container.elasticsearch.enabled=false
- ELASTICSEARCH_USERNAME=${ES_USERNAME}
- ELASTICSEARCH_PASSWORD=${ES_PASSWORD}
ports:
- 5601:5601
networks:
- siemnet
depends_on:
- elasticsearch

# use dsiem name for frontend to avoid changing logstash configuration
dsiem:
container_name: dsiem-frontend
image: defenxor/dsiem:latest
environment:
- DSIEM_MODE=cluster-frontend
- DSIEM_NODE=dsiem-frontend-0
- DSIEM_MSQ=nats://dsiem-nats:4222
- DSIEM_PORT=8080
- DSIEM_DEBUG=true
- DSIEM_WEB_ESURL=http://${ES_USERNAME}:${ES_PASSWORD}@localhost:9200
- DSIEM_WEB_KBNURL=http://localhost:5601
ports:
- "8080:8080"
networks:
- siemnet

dsiem-backend:
container_name: dsiem-backend
image: defenxor/dsiem:latest
environment:
- DSIEM_MODE=cluster-backend
- DSIEM_NODE=dsiem-backend-0
- DSIEM_DEBUG=true
- DSIEM_FRONTEND=http://dsiem:8080
- DSIEM_MSQ=nats://dsiem-nats:4222
networks:
- siemnet
volumes:
- dsiem-log:/dsiem/logs

nats:
container_name: dsiem-nats
image: nats:1.3.0-linux
networks:
- siemnet

filebeat:
container_name: filebeat
image: docker.elastic.co/beats/filebeat:7.11.0
user: root
networks:
- siemnet
volumes:
- filebeat-data:/usr/share/filebeat/data
- ./conf/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml
- dsiem-log:/var/log/dsiem
- suricata-log:/var/log/suricata

suricata:
container_name: suricata
image: defenxor/suricata:1710
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
command:
[
"/bin/bash",
"-c",
"chown -R suri /var/log/suricata && /usr/bin/suricata -v -i ${PROMISC_INTERFACE}"
]
volumes:
- suricata-log:/var/log/suricata

volumes:
filebeat-data:
es-data:
dsiem-log:
suricata-log:
networks:
siemnet:
Loading

0 comments on commit 07b3718

Please sign in to comment.