Cheat Sheet
http://127.0.0.1:8000/en-US/app/splunk_monitoring_console/search?q=search%20Denied%20%7C%20stats%20count(DPT)%20by%20SRC&display.page.search.mode=verbose&dispatch.sample_ratio=1&workload_pool=&earliest=%40mon&latest=now&display.page.search.tab=patterns&display.general.type=statistics&sid=1603753714.265&display.visualizations.charting.chart=pie
# list ports that ip was denied
"SRC=192.168.1.175" Denied | stats values(DPT) by SRC
# list destinations that IP went to on denied port
SRC=192.168.1.175" Denied "DPT=5229"| stats values(DST)
# All denied ports to this IP sorted by SRC IP
"DST=108.128.109.46" Denied | stats values(DPT) by SRC
# every destination trying to use this port, denied
"DPT=5229" Denied | stats values(DPT) by DST
# all ports that all ips were denied (my favorite)
Denied | stats values(DPT) by SRC
# all denied services and their destination IPs
Denied | stats values(DST) by DPT
# password attempts
fail* AND pass*
# just highlight stuff
Denied | highlight DPT, DST
# how much happened per day?
Denied | stats count(DPT) by date_wday
# which denied ports are clogging your logs?
Denied | stats count(DPT) by DPT
# which SRC is clogging your logs?
Denied | stats count(DPT) by SRC
# Top 50 ip
| top limit=50 src_ip, dest_ip
# How Many 403 Errors
sourcetype="access*" status=403 | stats count
# AVG Kbps on hosts
sourcetype="access*"" | stats avg(kbps) by host
# Sparklines
* | stats sparkline count by host
# Email Query
sourcetype=sendmail_syslog
| rex "From: (?<from>.*) To: (?<to>.*)"
# Stats Overall
sourcetype="access*"| stats count by eventtype
# Web Access Time
sourcetype=access*
| stats sparkline(avg(spent),5m), count by file
| sort – count
# Data Trends
sourcetype=access* | timechart avg(bytes) as avg_bytes
trendline sma5(avg_bytes) as moving_avg_bytes
eval spike=if(avg_bytes > 2 * moving_avg_bytes, 10000, 0)
# Silent Hosts
| metadata type=hosts
| sort recentTime
| convert ctime(recentTime) as Latest_Time
# Concurrent Users
<your search here> sourcetype=login_data
| concurrency duration=ReqTime
| timechart max(concurrency)
# Server Load
sourcetype=top load_avg>80
| stats max(load_avg) by host
# Servers Slow Response
sourcetype=weblog
| stats perc95(response_time) AS resp_time_95 by uri_path
| where resp_time_95>200
# Servers not being used very much
sourcetype=weblog
| stats count by host
| where count<10000
# Syslog Top Errors
sourcetype=syslog ERROR | top host | fields - something
# User Logins
sourcetype=syslog [search login error | return user]
index=_audit action="login attempt"
| stats count by user info action _time
| sort - info
Denied 192.168.7.*
| stats count(SRC) values(DST) values(DPT) by SRC
| rest /servicesNS/-/-/data/ui/views splunk_server=*
| search isDashboard=1
| rename eai:acl.app as app
| fields title app
| join type=left title
[| search index=_internal sourcetype=splunk_web_access host=* user=*
| rex field=uri_path ".*/(?<title>[^/]*)$"
| stats latest(_time) as Time latest(user) as user by title
]
| where isnotnull(Time)
| eval Now=now()
| eval "Days since last accessed"=round((Now-Time)/86400,2)
| sort - "Days since last accessed"
| convert ctime(Time)
| fields - Now
index=_internal sourcetype=scheduler result_count
| extract pairdelim=",", kvdelim="=", auto=f
| stats avg(result_count) min(result_count) max(result_count), sparkline avg(run_time) min(run_time) max(run_time) sum(run_time) values(host) AS hosts count AS execution_count by savedsearch_name, app
| join savedsearch_name type=outer
[| rest /servicesNS/-/-/saved/searches
| fields title eai:acl.owner cron_schedule dispatch.earliest_time dispatch.latest_time search
| rename title AS savedsearch_name eai:acl.app AS App eai:acl.owner AS Owner cron_schedule AS "Cron Schedule" dispatch.earliest_time AS "Dispatch Earliest Time" dispatch.latest_time AS "Dispatch Latest Time"]
| rename savedsearch_name AS "Saved Search Name" search AS "SPL Query" app AS App
| makemv delim="," values(host)
| sort - avg(run_time)
| table "Saved Search Name", App, Owner, "SPL Query" "Cron Schedule" hosts, execution_count, sparkline, *(result_count), sum(run_time) *(run_time)
# Search History
index=_audit action=search sourcetype=audittrail search_id=* NOT (user=splunk-system-user) search!="'typeahead*"
| rex "search\=\'(search|\s+)\s(?P<search>[\n\S\s]+?(?=\'))"
| rex field=search "sourcetype\s*=\s*\"*(?<SourcetypeUsed>[^\s\"]+)"
| rex field=search "index\s*=\s*\"*(?<IndexUsed>[^\s\"]+)"
| stats latest(_time) as Latest by user search SourcetypeUsed IndexUsed
| convert ctime(Latest)
# User Search Time
index=_audit splunk_server=local action=search (id=* OR search_id=*)
| eval search_id = if(isnull(search_id), id, search_id)
| replace '*' with * in search_id
| rex "search='search\s(?<search>.*?)',\sautojoin"
| search search_id!=scheduler_*
| convert num(total_run_time)
| eval user = if(user="n/a", null(), user)
| stats min(_time) as _time first(user) as user max(total_run_time) as total_run_time first(search) as search by search_id
| search search!=*_internal* search!=*_audit*
| chart sum(total_run_time) as "Total search time" count as "Search count" max(_time) as "Last use" by user
| fieldformat "Last use" = strftime('Last use', "%F %T.%Q")
# List of Forwarders
index="_internal" sourcetype=splunkd group=tcpin_connections NOT eventType=*
| eval Hostname=if(isnull(hostname), sourceHost,hostname),version=if(isnull(version),"pre 4.2",version),architecture=if(isnull(arch),"n/a",arch)
| stats count by Hostname version architecture
| sort + version
# License Consumption
index=_internal source=*license_usage.log type="Usage" splunk_server=*
| eval Date=strftime(_time, "%Y/%m/%d")
| eventstats sum(b) as volume by idx, Date
| eval MB=round(volume/1024/1024,5)
| timechart first(MB) AS volume by idx
# who to where
index=*
|stats count by DST,SRC
|sort -count
|stats list(DST) as Destination, list(count) as count by SRC
# top 5 by count
index=*
|stats count by DST,SRC
|sort -count
|stats list(DST) as Destination, list(count) as count, sum(count) as total by SRC
| sort -total
| head 5
| fields - total
# top 5 by count alternate
index=*
|stats count by DST,SRC,DPT
|sort -count
|stats list(DST) as DESTINATION, list(count) as TOTAL, list(DPT) as DPORT, sum(count) as total by SRC
| sort -total
| head 5
| fields - total
# anomolies
192.168.7.* denied
| cluster showcount=true | sort – cluster_count | head 20
# Pattern
192.168.7.*
| timechart avg(BYTES_SENT) as SENT
| trendline sma5(SENT) as moving_avg
| eval spike=if(BYTES_SENT > 2 * moving_avg, 10000, 0)
# Daily License Usage
index=_internal source=*license_usage.log type="Usage"
| eval h=if(len(h)=0 OR isnull(h),"(SQUASHED)",h)
| eval s=if(len(s)=0 OR isnull(s),"(SQUASHED)",s)
| eval idx=if(len(idx)=0 OR isnull(idx),"(UNKNOWN)",idx)
| bin _time span=1d
| stats sum(b) as b by _time, pool, s, st, h, idx
| timechart span=1d sum(b) AS volumeB by idx fixedrange=false
| join type=outer _time [search index=_internal source=*license_usage.log type="RolloverSummary" earliest=-30d@d
| eval _time=_time - 43200
| bin _time span=1d
| stats latest(stacksz) AS "stack size" by _time]
| fields - _timediff
| foreach * [eval <<FIELD>>=round('<<FIELD>>'/1024/1024, 3)]
| fields - "stack size"
# Daily LIcense usage 2
index=_internal source=*license_usage.log type="Usage" splunk_server=* earliest=-1w@d | eval Date=strftime(_time, "%Y/%m/%d") | eventstats sum(b) as volume by idx, Date | eval MB=round(volume/1024/1024,5)| timechart first(MB) AS volume by idx
# scans between zones
index=xxx_paloalto sourcetype="pan:traffic" type=TRAFFIC
(src_zone!="Inet-WAN1" OR src_zone!="Inet-WAN2")
(dest_zone!="Inet-WAN1" OR dest_zone!="Inet-WAN2")
`Users_Subnets` `Not_Common_Ports_xxx`
| bin _time span=1s
| stats count as secCount, values(dest_zone) as dest_zone, values(dest_port) as dest_port,
values(user) as User by _time src_zone src_ip dest_ip
| where (src_zone != dest_zone)
| streamstats time_window=3s sum(secCount) as sumCount,
values(dest_zone) as all_dest_ports by src_zone src_ip dest_ip
| where (mvcount(all_dest_ports) >= 100)
index=*
| bin _time span=15m
| stats count, values(DST) as dest, values(DPT) as dest_port, values(SRC) as source by _time SRC DST
| bin _time as Day span=1d
| eventstats values(dest_port) as all_dest_ports by Day source dest
| where (mvcount(dest_port) >= 5) OR (mvcount(all_dest_ports) >= 25)
# port and ip usage aggregate
index=*
| stats dc(DPT) as num_dest_port dc(DST) as num_dest_ip by SRC
| where num_dest_port > 20 or num_dest_ip > 20
| sort -num_dest_port
| bin _time span=1s
# Detect Port Scans
index=*
| bucket _time span=30 | eventstats dc(DPT) AS PortsScanned by SRC, _time | where PortsScanned > 20 | dedup SRC, PortsScanned | table SRC, PortsScanned, _time
# show sources
index=_internal | chart count by sourcetype
index=_internal | chart sparkline count by sourcetype
| sort -count
# who is scanning usa
index=*
| stats dc(DPT) as num_dest_port dc(DST) as num_dest_ip by SRC
| where num_dest_port > 20 or num_dest_ip > 20
| sort -num_dest_port
# table of port scan count by IP
index=*
| where NOT cidrmatch("192.168.0.0/16",SRC)
| bucket span=1h _time | eventstats dc(DPT) AS port_scan by SRC, _time | where port_scan > 20 | timechart dc(DPT) by SRC
Visualization
Save as Reports