Remove backends subsystem (#12146)
This commit is contained in:
parent
908d369195
commit
52456f5baf
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@ -8,11 +8,6 @@
|
|||||||
.travis/ @Ferroin @iigorkarpov @maneamarius @kaskavel
|
.travis/ @Ferroin @iigorkarpov @maneamarius @kaskavel
|
||||||
.github/ @Ferroin @iigorkarpov @maneamarius @kaskavel
|
.github/ @Ferroin @iigorkarpov @maneamarius @kaskavel
|
||||||
aclk/ @stelfrag @underhood
|
aclk/ @stelfrag @underhood
|
||||||
backends/ @thiagoftsm @vlvkobal
|
|
||||||
backends/graphite/ @thiagoftsm @vlvkobal
|
|
||||||
backends/json/ @thiagoftsm @vlvkobal
|
|
||||||
backends/opentsdb/ @thiagoftsm @vlvkobal
|
|
||||||
backends/prometheus/ @vlvkobal @thiagoftsm
|
|
||||||
build/ @Ferroin @iigorkarpov @maneamarius
|
build/ @Ferroin @iigorkarpov @maneamarius
|
||||||
contrib/debian @Ferroin @iigorkarpov @maneamarius
|
contrib/debian @Ferroin @iigorkarpov @maneamarius
|
||||||
collectors/ @vlvkobal
|
collectors/ @vlvkobal
|
||||||
|
|||||||
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
@ -15,10 +15,6 @@ ACLK:
|
|||||||
- aclk/**/*
|
- aclk/**/*
|
||||||
- mqtt_websockets
|
- mqtt_websockets
|
||||||
|
|
||||||
area/backends:
|
|
||||||
- backends/*
|
|
||||||
- backends/**/*
|
|
||||||
|
|
||||||
area/claim:
|
area/claim:
|
||||||
- claim/*
|
- claim/*
|
||||||
|
|
||||||
|
|||||||
@ -745,19 +745,6 @@ set(STREAMING_PLUGIN_FILES
|
|||||||
streaming/sender.c
|
streaming/sender.c
|
||||||
)
|
)
|
||||||
|
|
||||||
set(BACKENDS_PLUGIN_FILES
|
|
||||||
backends/backends.c
|
|
||||||
backends/backends.h
|
|
||||||
backends/graphite/graphite.c
|
|
||||||
backends/graphite/graphite.h
|
|
||||||
backends/json/json.c
|
|
||||||
backends/json/json.h
|
|
||||||
backends/opentsdb/opentsdb.c
|
|
||||||
backends/opentsdb/opentsdb.h
|
|
||||||
backends/prometheus/backend_prometheus.c
|
|
||||||
backends/prometheus/backend_prometheus.h
|
|
||||||
)
|
|
||||||
|
|
||||||
set(CLAIM_PLUGIN_FILES
|
set(CLAIM_PLUGIN_FILES
|
||||||
claim/claim.c
|
claim/claim.c
|
||||||
claim/claim.h
|
claim/claim.h
|
||||||
@ -884,23 +871,6 @@ set(MONGODB_EXPORTING_FILES
|
|||||||
exporting/mongodb/mongodb.h
|
exporting/mongodb/mongodb.h
|
||||||
)
|
)
|
||||||
|
|
||||||
set(KINESIS_BACKEND_FILES
|
|
||||||
backends/aws_kinesis/aws_kinesis.c
|
|
||||||
backends/aws_kinesis/aws_kinesis.h
|
|
||||||
backends/aws_kinesis/aws_kinesis_put_record.cc
|
|
||||||
backends/aws_kinesis/aws_kinesis_put_record.h
|
|
||||||
)
|
|
||||||
|
|
||||||
set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES
|
|
||||||
backends/prometheus/remote_write/remote_write.cc
|
|
||||||
backends/prometheus/remote_write/remote_write.h
|
|
||||||
)
|
|
||||||
|
|
||||||
set(MONGODB_BACKEND_FILES
|
|
||||||
backends/mongodb/mongodb.c
|
|
||||||
backends/mongodb/mongodb.h
|
|
||||||
)
|
|
||||||
|
|
||||||
set(DAEMON_FILES
|
set(DAEMON_FILES
|
||||||
daemon/buildinfo.c
|
daemon/buildinfo.c
|
||||||
daemon/buildinfo.h
|
daemon/buildinfo.h
|
||||||
@ -960,7 +930,6 @@ set(NETDATA_FILES
|
|||||||
collectors/all.h
|
collectors/all.h
|
||||||
${DAEMON_FILES}
|
${DAEMON_FILES}
|
||||||
${API_PLUGIN_FILES}
|
${API_PLUGIN_FILES}
|
||||||
${BACKENDS_PLUGIN_FILES}
|
|
||||||
${EXPORTING_ENGINE_FILES}
|
${EXPORTING_ENGINE_FILES}
|
||||||
${CHECKS_PLUGIN_FILES}
|
${CHECKS_PLUGIN_FILES}
|
||||||
${HEALTH_PLUGIN_FILES}
|
${HEALTH_PLUGIN_FILES}
|
||||||
@ -997,25 +966,25 @@ add_definitions(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# kinesis backend
|
# kinesis exporting connector
|
||||||
|
|
||||||
IF(KINESIS_LIBRARIES AND AWS_CORE_LIBRARIES AND HAVE_AWS_EVENT_STREAM AND HAVE_AWS_COMMON AND HAVE_AWS_CHECKSUMS AND
|
IF(KINESIS_LIBRARIES AND AWS_CORE_LIBRARIES AND HAVE_AWS_EVENT_STREAM AND HAVE_AWS_COMMON AND HAVE_AWS_CHECKSUMS AND
|
||||||
CRYPTO_LIBRARIES AND SSL_LIBRARIES AND CURL_LIBRARIES)
|
CRYPTO_LIBRARIES AND SSL_LIBRARIES AND CURL_LIBRARIES)
|
||||||
SET(ENABLE_BACKEND_KINESIS True)
|
SET(ENABLE_EXPORTING_KINESIS True)
|
||||||
ELSE()
|
ELSE()
|
||||||
SET(ENABLE_BACKEND_KINESIS False)
|
SET(ENABLE_EXPORTING_KINESIS False)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF(ENABLE_BACKEND_KINESIS)
|
IF(ENABLE_EXPORTING_KINESIS)
|
||||||
message(STATUS "kinesis backend: enabled")
|
message(STATUS "kinesis exporting: enabled")
|
||||||
list(APPEND NETDATA_FILES ${KINESIS_BACKEND_FILES} ${KINESIS_EXPORTING_FILES})
|
list(APPEND NETDATA_FILES ${KINESIS_EXPORTING_FILES})
|
||||||
list(APPEND NETDATA_COMMON_LIBRARIES ${KINESIS_LIBRARIES} ${AWS_CORE_LIBRARIES}
|
list(APPEND NETDATA_COMMON_LIBRARIES ${KINESIS_LIBRARIES} ${AWS_CORE_LIBRARIES}
|
||||||
${CRYPTO_LIBRARIES} ${SSL_LIBRARIES} ${CURL_LIBRARIES})
|
${CRYPTO_LIBRARIES} ${SSL_LIBRARIES} ${CURL_LIBRARIES})
|
||||||
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${KINESIS_INCLUDE_DIRS} ${AWS_CORE_INCLUDE_DIRS}
|
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${KINESIS_INCLUDE_DIRS} ${AWS_CORE_INCLUDE_DIRS}
|
||||||
${CRYPTO_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CURL_INCLUDE_DIRS})
|
${CRYPTO_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CURL_INCLUDE_DIRS})
|
||||||
list(APPEND NETDATA_COMMON_CFLAGS ${CRYPTO_CFLAGS_OTHER} ${SSL_CFLAGS_OTHER} ${CURL_CFLAGS_OTHER})
|
list(APPEND NETDATA_COMMON_CFLAGS ${CRYPTO_CFLAGS_OTHER} ${SSL_CFLAGS_OTHER} ${CURL_CFLAGS_OTHER})
|
||||||
ELSE()
|
ELSE()
|
||||||
message(STATUS "kinesis backend: disabled (requires AWS SDK for C++)")
|
message(STATUS "kinesis exporting: disabled (requires AWS SDK for C++)")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
@ -1038,16 +1007,16 @@ ELSE()
|
|||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# prometheus remote write backend
|
# prometheus remote write exporting connector
|
||||||
|
|
||||||
IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
|
IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
|
||||||
SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE True)
|
SET(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE True)
|
||||||
ELSE()
|
ELSE()
|
||||||
SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE False)
|
SET(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE False)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
|
IF(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
|
||||||
message(STATUS "prometheus remote write backend: enabled")
|
message(STATUS "prometheus remote write exporting: enabled")
|
||||||
|
|
||||||
find_package(Protobuf REQUIRED)
|
find_package(Protobuf REQUIRED)
|
||||||
|
|
||||||
@ -1083,26 +1052,26 @@ IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
|
|||||||
|
|
||||||
protobuf_remote_write_generate_cpp(PROTO_SRCS PROTO_HDRS exporting/prometheus/remote_write/remote_write.proto)
|
protobuf_remote_write_generate_cpp(PROTO_SRCS PROTO_HDRS exporting/prometheus/remote_write/remote_write.proto)
|
||||||
|
|
||||||
list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_BACKEND_FILES} ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
|
list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
|
||||||
list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
|
list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
|
||||||
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
|
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
|
list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
|
||||||
ELSE()
|
ELSE()
|
||||||
message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)")
|
message(STATUS "prometheus remote write exporting: disabled (requires protobuf and snappy libraries)")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# mongodb backend
|
# mongodb exporting connector
|
||||||
|
|
||||||
IF(MONGOC_LIBRARIES)
|
IF(MONGOC_LIBRARIES)
|
||||||
message(STATUS "mongodb backend: enabled")
|
message(STATUS "mongodb exporting: enabled")
|
||||||
|
|
||||||
list(APPEND NETDATA_FILES ${MONGODB_BACKEND_FILES} ${MONGODB_EXPORTING_FILES})
|
list(APPEND NETDATA_FILES ${MONGODB_EXPORTING_FILES})
|
||||||
list(APPEND NETDATA_COMMON_LIBRARIES ${MONGOC_LIBRARIES})
|
list(APPEND NETDATA_COMMON_LIBRARIES ${MONGOC_LIBRARIES})
|
||||||
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${MONGOC_INCLUDE_DIRS})
|
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${MONGOC_INCLUDE_DIRS})
|
||||||
list(APPEND NETDATA_COMMON_CFLAGS ${MONGOC_CFLAGS_OTHER})
|
list(APPEND NETDATA_COMMON_CFLAGS ${MONGOC_CFLAGS_OTHER})
|
||||||
ELSE()
|
ELSE()
|
||||||
message(STATUS "mongodb backend: disabled (requires mongoc library)")
|
message(STATUS "mongodb exporting: disabled (requires mongoc library)")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT})
|
set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT})
|
||||||
@ -1223,7 +1192,7 @@ ELSEIF(MACOS)
|
|||||||
|
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF(ENABLE_BACKEND_KINESIS OR ENABLE_EXPORTING_PUBSUB OR ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
|
IF(ENABLE_EXPORTING_KINESIS OR ENABLE_EXPORTING_PUBSUB OR ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
|
||||||
set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
|
set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
|
||||||
set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
|
set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
@ -1397,7 +1366,7 @@ if(BUILD_TESTING)
|
|||||||
set(KINESIS_LINK_OPTIONS)
|
set(KINESIS_LINK_OPTIONS)
|
||||||
set(PUBSUB_LINK_OPTIONS)
|
set(PUBSUB_LINK_OPTIONS)
|
||||||
set(MONGODB_LINK_OPTIONS)
|
set(MONGODB_LINK_OPTIONS)
|
||||||
if(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
|
if(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
|
||||||
list(APPEND EXPORTING_ENGINE_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
|
list(APPEND EXPORTING_ENGINE_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
|
||||||
list(
|
list(
|
||||||
APPEND PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS
|
APPEND PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS
|
||||||
@ -1407,7 +1376,7 @@ if(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
|
|||||||
-Wl,--wrap=add_metric
|
-Wl,--wrap=add_metric
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
if(ENABLE_BACKEND_KINESIS)
|
if(ENABLE_EXPORTING_KINESIS)
|
||||||
list(APPEND EXPORTING_ENGINE_FILES ${KINESIS_EXPORTING_FILES})
|
list(APPEND EXPORTING_ENGINE_FILES ${KINESIS_EXPORTING_FILES})
|
||||||
list(
|
list(
|
||||||
APPEND KINESIS_LINK_OPTIONS
|
APPEND KINESIS_LINK_OPTIONS
|
||||||
|
|||||||
58
Makefile.am
58
Makefile.am
@ -99,7 +99,6 @@ dist_noinst_SCRIPTS = \
|
|||||||
# Compile netdata binaries
|
# Compile netdata binaries
|
||||||
|
|
||||||
SUBDIRS += \
|
SUBDIRS += \
|
||||||
backends \
|
|
||||||
collectors \
|
collectors \
|
||||||
daemon \
|
daemon \
|
||||||
database \
|
database \
|
||||||
@ -580,19 +579,6 @@ WEB_PLUGIN_FILES = \
|
|||||||
web/server/static/static-threaded.h \
|
web/server/static/static-threaded.h \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
|
|
||||||
BACKENDS_PLUGIN_FILES = \
|
|
||||||
backends/backends.c \
|
|
||||||
backends/backends.h \
|
|
||||||
backends/graphite/graphite.c \
|
|
||||||
backends/graphite/graphite.h \
|
|
||||||
backends/json/json.c \
|
|
||||||
backends/json/json.h \
|
|
||||||
backends/opentsdb/opentsdb.c \
|
|
||||||
backends/opentsdb/opentsdb.h \
|
|
||||||
backends/prometheus/backend_prometheus.c \
|
|
||||||
backends/prometheus/backend_prometheus.h \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
CLAIM_FILES = \
|
CLAIM_FILES = \
|
||||||
claim/claim.c \
|
claim/claim.c \
|
||||||
claim/claim.h \
|
claim/claim.h \
|
||||||
@ -831,23 +817,6 @@ MONGODB_EXPORTING_FILES = \
|
|||||||
exporting/mongodb/mongodb.h \
|
exporting/mongodb/mongodb.h \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
|
|
||||||
KINESIS_BACKEND_FILES = \
|
|
||||||
backends/aws_kinesis/aws_kinesis.c \
|
|
||||||
backends/aws_kinesis/aws_kinesis.h \
|
|
||||||
backends/aws_kinesis/aws_kinesis_put_record.cc \
|
|
||||||
backends/aws_kinesis/aws_kinesis_put_record.h \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
|
|
||||||
backends/prometheus/remote_write/remote_write.cc \
|
|
||||||
backends/prometheus/remote_write/remote_write.h \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
MONGODB_BACKEND_FILES = \
|
|
||||||
backends/mongodb/mongodb.c \
|
|
||||||
backends/mongodb/mongodb.h \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
DAEMON_FILES = \
|
DAEMON_FILES = \
|
||||||
daemon/buildinfo.c \
|
daemon/buildinfo.c \
|
||||||
daemon/buildinfo.h \
|
daemon/buildinfo.h \
|
||||||
@ -877,7 +846,6 @@ NETDATA_FILES = \
|
|||||||
$(DAEMON_FILES) \
|
$(DAEMON_FILES) \
|
||||||
$(LIBNETDATA_FILES) \
|
$(LIBNETDATA_FILES) \
|
||||||
$(API_PLUGIN_FILES) \
|
$(API_PLUGIN_FILES) \
|
||||||
$(BACKENDS_PLUGIN_FILES) \
|
|
||||||
$(EXPORTING_ENGINE_FILES) \
|
$(EXPORTING_ENGINE_FILES) \
|
||||||
$(CHECKS_PLUGIN_FILES) \
|
$(CHECKS_PLUGIN_FILES) \
|
||||||
$(HEALTH_PLUGIN_FILES) \
|
$(HEALTH_PLUGIN_FILES) \
|
||||||
@ -1065,8 +1033,8 @@ if ENABLE_PLUGIN_SLABINFO
|
|||||||
$(NULL)
|
$(NULL)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if ENABLE_BACKEND_KINESIS
|
if ENABLE_EXPORTING_KINESIS
|
||||||
netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
|
netdata_SOURCES += $(KINESIS_EXPORTING_FILES)
|
||||||
netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
|
netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -1075,17 +1043,17 @@ if ENABLE_EXPORTING_PUBSUB
|
|||||||
netdata_LDADD += $(OPTIONAL_PUBSUB_LIBS)
|
netdata_LDADD += $(OPTIONAL_PUBSUB_LIBS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
|
if ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE
|
||||||
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
|
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
|
||||||
netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
|
netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
|
||||||
$(OPTIONAL_PROTOBUF_LIBS) \
|
$(OPTIONAL_PROTOBUF_LIBS) \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
BACKEND_PROMETHEUS_BUILT_SOURCES = \
|
EXPORTING_PROMETHEUS_BUILT_SOURCES = \
|
||||||
exporting/prometheus/remote_write/remote_write.pb.cc \
|
exporting/prometheus/remote_write/remote_write.pb.cc \
|
||||||
exporting/prometheus/remote_write/remote_write.pb.h \
|
exporting/prometheus/remote_write/remote_write.pb.h \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
BUILT_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES)
|
BUILT_SOURCES += $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
|
||||||
nodist_netdata_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES)
|
nodist_netdata_SOURCES += $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
|
||||||
|
|
||||||
exporting/prometheus/remote_write/remote_write.pb.cc \
|
exporting/prometheus/remote_write/remote_write.pb.cc \
|
||||||
exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote_write/remote_write.proto
|
exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote_write/remote_write.proto
|
||||||
@ -1093,8 +1061,8 @@ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote
|
|||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if ENABLE_BACKEND_MONGODB
|
if ENABLE_EXPORTING_MONGODB
|
||||||
netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
|
netdata_SOURCES += $(MONGODB_EXPORTING_FILES)
|
||||||
netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
|
netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -1217,7 +1185,7 @@ if ENABLE_UNITTESTS
|
|||||||
$(TEST_LDFLAGS) \
|
$(TEST_LDFLAGS) \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
|
exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
|
||||||
if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
|
if ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE
|
||||||
exporting_tests_exporting_engine_testdriver_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
|
exporting_tests_exporting_engine_testdriver_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
|
||||||
exporting_tests_exporting_engine_testdriver_LDADD += \
|
exporting_tests_exporting_engine_testdriver_LDADD += \
|
||||||
$(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
|
$(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
|
||||||
@ -1229,9 +1197,9 @@ if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
|
|||||||
-Wl,--wrap=add_label \
|
-Wl,--wrap=add_label \
|
||||||
-Wl,--wrap=add_metric \
|
-Wl,--wrap=add_metric \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(BACKEND_PROMETHEUS_BUILT_SOURCES)
|
nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
|
||||||
endif
|
endif
|
||||||
if ENABLE_BACKEND_KINESIS
|
if ENABLE_EXPORTING_KINESIS
|
||||||
exporting_tests_exporting_engine_testdriver_SOURCES += $(KINESIS_EXPORTING_FILES)
|
exporting_tests_exporting_engine_testdriver_SOURCES += $(KINESIS_EXPORTING_FILES)
|
||||||
exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_KINESIS_LIBS)
|
exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_KINESIS_LIBS)
|
||||||
exporting_tests_exporting_engine_testdriver_LDFLAGS += \
|
exporting_tests_exporting_engine_testdriver_LDFLAGS += \
|
||||||
@ -1251,7 +1219,7 @@ if ENABLE_EXPORTING_PUBSUB
|
|||||||
-Wl,--wrap=pubsub_get_result \
|
-Wl,--wrap=pubsub_get_result \
|
||||||
$(NULL)
|
$(NULL)
|
||||||
endif
|
endif
|
||||||
if ENABLE_BACKEND_MONGODB
|
if ENABLE_EXPORTING_MONGODB
|
||||||
exporting_tests_exporting_engine_testdriver_SOURCES += $(MONGODB_EXPORTING_FILES)
|
exporting_tests_exporting_engine_testdriver_SOURCES += $(MONGODB_EXPORTING_FILES)
|
||||||
exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_MONGOC_LIBS)
|
exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_MONGOC_LIBS)
|
||||||
exporting_tests_exporting_engine_testdriver_LDFLAGS += \
|
exporting_tests_exporting_engine_testdriver_LDFLAGS += \
|
||||||
|
|||||||
@ -1,22 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
|
|
||||||
SUBDIRS = \
|
|
||||||
graphite \
|
|
||||||
json \
|
|
||||||
opentsdb \
|
|
||||||
prometheus \
|
|
||||||
aws_kinesis \
|
|
||||||
mongodb \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
dist_noinst_DATA = \
|
|
||||||
README.md \
|
|
||||||
WALKTHROUGH.md \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
dist_noinst_SCRIPTS = \
|
|
||||||
nc-backend.sh \
|
|
||||||
$(NULL)
|
|
||||||
@ -1,236 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Metrics long term archiving"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Metrics long term archiving
|
|
||||||
|
|
||||||
> ⚠️ The backends system is now deprecated in favor of the [exporting engine](/exporting/README.md).
|
|
||||||
|
|
||||||
Netdata supports backends for archiving the metrics, or providing long term dashboards, using Grafana or other tools,
|
|
||||||
like this:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Since Netdata collects thousands of metrics per server per second, which would easily congest any backend server when
|
|
||||||
several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
|
|
||||||
|
|
||||||
So, although Netdata collects metrics every second, it can send to the backend servers averages or sums every X seconds
|
|
||||||
(though, it can send them per second if you need it to).
|
|
||||||
|
|
||||||
## features
|
|
||||||
|
|
||||||
1. Supported backends
|
|
||||||
|
|
||||||
- **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, **Blueflood**,
|
|
||||||
**ElasticSearch** via logstash tcp input and the graphite codec, etc)
|
|
||||||
|
|
||||||
metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is configured below,
|
|
||||||
`hostname` is the hostname of the machine (can also be configured).
|
|
||||||
|
|
||||||
- **opentsdb** (`telnet or HTTP interfaces`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
|
|
||||||
|
|
||||||
metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
|
|
||||||
|
|
||||||
- **json** document DBs
|
|
||||||
|
|
||||||
metrics are sent to a document db, `JSON` formatted.
|
|
||||||
|
|
||||||
- **prometheus** is described at [prometheus page](/backends/prometheus/README.md) since it pulls data from
|
|
||||||
Netdata.
|
|
||||||
|
|
||||||
- **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
|
|
||||||
**Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**, **PostgreSQL/TimescaleDB**,
|
|
||||||
**Splunk**, **VictoriaMetrics**, and a lot of other [storage
|
|
||||||
providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
|
|
||||||
|
|
||||||
metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus
|
|
||||||
protocol](/backends/prometheus/README.md). Notes on using the remote write backend are [here](/backends/prometheus/remote_write/README.md).
|
|
||||||
|
|
||||||
- **TimescaleDB** via [community-built connector](/backends/TIMESCALE.md) that takes JSON streams from a Netdata
|
|
||||||
client and writes them to a TimescaleDB table.
|
|
||||||
|
|
||||||
- **AWS Kinesis Data Streams**
|
|
||||||
|
|
||||||
metrics are sent to the service in `JSON` format.
|
|
||||||
|
|
||||||
- **MongoDB**
|
|
||||||
|
|
||||||
metrics are sent to the database in `JSON` format.
|
|
||||||
|
|
||||||
2. Only one backend may be active at a time.
|
|
||||||
|
|
||||||
3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
|
|
||||||
|
|
||||||
4. Netdata supports three modes of operation for all backends:
|
|
||||||
|
|
||||||
- `as-collected` sends to backends the metrics as they are collected, in the units they are collected. So,
|
|
||||||
counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example, to
|
|
||||||
calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
|
|
||||||
|
|
||||||
- `average` sends to backends normalized metrics from the Netdata database. In this mode, all metrics are sent as
|
|
||||||
gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but you will not
|
|
||||||
be able to copy and paste queries from other sources to convert units. For example, CPU utilization percentage
|
|
||||||
is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage to the
|
|
||||||
backend.
|
|
||||||
|
|
||||||
- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the backend. So, if
|
|
||||||
Netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the
|
|
||||||
Netdata charts will be used.
|
|
||||||
|
|
||||||
Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
|
|
||||||
monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
|
|
||||||
and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
|
|
||||||
|
|
||||||
If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
|
|
||||||
Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
|
|
||||||
simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in
|
|
||||||
Netdata, which is not necessarily true for the other modes of operation.
|
|
||||||
|
|
||||||
5. This code is smart enough, not to slow down Netdata, independently of the speed of the backend server.
|
|
||||||
|
|
||||||
## configuration
|
|
||||||
|
|
||||||
In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version of `netdata.conf`
|
|
||||||
from your Netdata):
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
enabled = yes | no
|
|
||||||
type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis | mongodb
|
|
||||||
host tags = list of TAG=VALUE
|
|
||||||
destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis
|
|
||||||
data source = average | sum | as collected
|
|
||||||
prefix = Netdata
|
|
||||||
hostname = my-name
|
|
||||||
update every = 10
|
|
||||||
buffer on failures = 10
|
|
||||||
timeout ms = 20000
|
|
||||||
send charts matching = *
|
|
||||||
send hosts matching = localhost *
|
|
||||||
send names instead of ids = yes
|
|
||||||
```
|
|
||||||
|
|
||||||
- `enabled = yes | no`, enables or disables sending data to a backend
|
|
||||||
|
|
||||||
- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis | mongodb`, selects the backend
|
|
||||||
type
|
|
||||||
|
|
||||||
- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
|
|
||||||
ports to connect to. Netdata will use the **first available** to send the metrics.
|
|
||||||
|
|
||||||
The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
|
|
||||||
|
|
||||||
`PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
|
|
||||||
|
|
||||||
`IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
|
|
||||||
separate it from the port.
|
|
||||||
|
|
||||||
`PORT` can be a number of a service name. If omitted, the default port for the backend will be used
|
|
||||||
(graphite = 2003, opentsdb = 4242).
|
|
||||||
|
|
||||||
Example IPv4:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
|
|
||||||
```
|
|
||||||
|
|
||||||
Example IPv6 and IPv4 together:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
destination = [ffff:...:0001]:2003 10.11.12.1:2003
|
|
||||||
```
|
|
||||||
|
|
||||||
When multiple servers are defined, Netdata will try the next one when the first one fails. This allows you to
|
|
||||||
load-balance different servers: give your backend servers in different order on each Netdata.
|
|
||||||
|
|
||||||
Netdata also ships `nc-backend.sh`, a script that can be used as a fallback backend to save the
|
|
||||||
metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
|
|
||||||
monitor / trace / debug the metrics Netdata generates.
|
|
||||||
|
|
||||||
For kinesis backend `destination` should be set to an AWS region (for example, `us-east-1`).
|
|
||||||
|
|
||||||
The MongoDB backend doesn't use the `destination` option for its configuration. It uses the `mongodb.conf`
|
|
||||||
[configuration file](/backends/mongodb/README.md) instead.
|
|
||||||
|
|
||||||
- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
|
|
||||||
be sent to the backend.
|
|
||||||
|
|
||||||
- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default this is
|
|
||||||
`[global].hostname`.
|
|
||||||
|
|
||||||
- `prefix = Netdata`, is the prefix to add to all metrics.
|
|
||||||
|
|
||||||
- `update every = 10`, is the number of seconds between sending data to the backend. Netdata will add some randomness
|
|
||||||
to this number, to prevent stressing the backend server when many Netdata servers send data to the same backend.
|
|
||||||
This randomness does not affect the quality of the data, only the time they are sent.
|
|
||||||
|
|
||||||
- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) to
|
|
||||||
buffer data, when the backend is not available. If the backend fails to receive the data after that many failures,
|
|
||||||
data loss on the backend is expected (Netdata will also log it).
|
|
||||||
|
|
||||||
- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. By default
|
|
||||||
this is `2 * update_every * 1000`.
|
|
||||||
|
|
||||||
- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
|
|
||||||
of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
|
|
||||||
`localhost`), allowing us to filter which hosts will be sent to the backend when this Netdata is a central Netdata
|
|
||||||
aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named `*db*`
|
|
||||||
except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first pattern
|
|
||||||
matching the hostname will be used - positive or negative).
|
|
||||||
|
|
||||||
- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
|
|
||||||
within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
|
|
||||||
gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
|
|
||||||
apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
|
|
||||||
positive or negative).
|
|
||||||
|
|
||||||
- `send names instead of ids = yes | no` controls the metric names Netdata should send to backend. Netdata supports
|
|
||||||
names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are
|
|
||||||
human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they
|
|
||||||
are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
|
|
||||||
|
|
||||||
- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. These are
|
|
||||||
currently only sent to graphite, json, opentsdb and prometheus. Please use the appropriate format for each
|
|
||||||
time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like `tag1="value1",
|
|
||||||
tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics between Netdata servers).
|
|
||||||
|
|
||||||
Starting from Netdata v1.20 the host tags are parsed in accordance with a configured backend type and stored as
|
|
||||||
host labels so that they can be reused in API responses and exporting connectors. The parsing is supported for
|
|
||||||
graphite, json, opentsdb, and prometheus (default) backend types. You can check how the host tags were parsed using
|
|
||||||
the /api/v1/info API call.
|
|
||||||
|
|
||||||
## monitoring operation
|
|
||||||
|
|
||||||
Netdata provides 5 charts:
|
|
||||||
|
|
||||||
1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
|
|
||||||
backend server.
|
|
||||||
|
|
||||||
2. **Buffered data size**, the amount of data (in KB) Netdata added the buffer.
|
|
||||||
|
|
||||||
3. ~~**Backend latency**, the time the backend server needed to process the data Netdata sent. If there was a
|
|
||||||
re-connection involved, this includes the connection time.~~ (this chart has been removed, because it only measures
|
|
||||||
the time Netdata needs to give the data to the O/S - since the backend servers do not ack the reception, Netdata
|
|
||||||
does not have any means to measure this properly).
|
|
||||||
|
|
||||||
4. **Backend operations**, the number of operations performed by Netdata.
|
|
||||||
|
|
||||||
5. **Backend thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending the
|
|
||||||
metrics to the backend server.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## alarms
|
|
||||||
|
|
||||||
Netdata adds 4 alarms:
|
|
||||||
|
|
||||||
1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data
|
|
||||||
2. `backend_metrics_sent`, percentage of metrics sent to the backend server
|
|
||||||
3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
|
|
||||||
4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by
|
|
||||||
Netdata~~ (this was misleading and has been removed).
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Writing metrics to TimescaleDB"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/TIMESCALE.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Writing metrics to TimescaleDB
|
|
||||||
|
|
||||||
Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
|
|
||||||
([GitHub](https://github.com/mahlonsmith)/[Website](http://www.martini.nu/)) in particular, Netdata now supports
|
|
||||||
archiving metrics directly to TimescaleDB.
|
|
||||||
|
|
||||||
What's TimescaleDB? Here's how their team defines the project on their [GitHub page](https://github.com/timescale/timescaledb):
|
|
||||||
|
|
||||||
> TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is engineered up from
|
|
||||||
> PostgreSQL, providing automatic partitioning across time and space (partitioning key), as well as full SQL support.
|
|
||||||
|
|
||||||
## Quickstart
|
|
||||||
|
|
||||||
To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
|
|
||||||
repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub.
|
|
||||||
|
|
||||||
This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
|
|
||||||
You'll run this program in parallel with Netdata, and after a short [configuration
|
|
||||||
process](https://github.com/mahlonsmith/netdata-timescale-relay#configuration), your metrics should start populating
|
|
||||||
TimescaleDB.
|
|
||||||
|
|
||||||
Finally, another member of Netdata's community has built a project that quickly launches Netdata, TimescaleDB, and
|
|
||||||
Grafana in easy-to-manage Docker containers. Rune Juhl Jacobsen's
|
|
||||||
[project](https://github.com/runejuhl/grafana-timescaledb) uses a `Makefile` to create everything, which makes it
|
|
||||||
perfect for testing and experimentation.
|
|
||||||
|
|
||||||
## Netdata↔TimescaleDB in action
|
|
||||||
|
|
||||||
Aside from creating incredible contributions to Netdata, Mahlon works at [LAIKA](https://www.laika.com/), an
|
|
||||||
Oregon-based animation studio that's helped create acclaimed films like _Coraline_ and _Kubo and the Two Strings_.
|
|
||||||
|
|
||||||
As part of his work to maintain the company's infrastructure of render farms, workstations, and virtual machines, he's
|
|
||||||
using Netdata, `netdata-timescale-relay`, and TimescaleDB to store Netdata metrics alongside other data from other
|
|
||||||
sources.
|
|
||||||
|
|
||||||
> LAIKA is a long-time PostgreSQL user and added TimescaleDB to their infrastructure in 2018 to help manage and store
|
|
||||||
> their IT metrics and time-series data. So far, the tool has been in production at LAIKA for over a year and helps them
|
|
||||||
> with their use case of time-based logging, where they record over 8 million metrics an hour for netdata content alone.
|
|
||||||
|
|
||||||
By archiving Netdata metrics to a backend like TimescaleDB, LAIKA can consolidate metrics data from distributed machines
|
|
||||||
efficiently. Mahlon can then correlate Netdata metrics with other sources directly in TimescaleDB.
|
|
||||||
|
|
||||||
And, because LAIKA will soon be storing years worth of Netdata metrics data in TimescaleDB, they can analyze long-term
|
|
||||||
metrics as their films move from concept to final cut.
|
|
||||||
|
|
||||||
Read the full blog post from LAIKA at the [TimescaleDB
|
|
||||||
blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timescaledb/amp/).
|
|
||||||
|
|
||||||
Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
|
|
||||||
backend connection between Netdata and TimescaleDB!
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,258 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Netdata, Prometheus, Grafana stack"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/WALKTHROUGH.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Netdata, Prometheus, Grafana stack
|
|
||||||
|
|
||||||
## Intro
|
|
||||||
|
|
||||||
In this article I will walk you through the basics of getting Netdata, Prometheus and Grafana all working together and
|
|
||||||
monitoring your application servers. This article will be using docker on your local workstation. We will be working
|
|
||||||
with docker in an ad-hoc way, launching containers that run ‘/bin/bash’ and attaching a TTY to them. I use docker here
|
|
||||||
in a purely academic fashion and do not condone running Netdata in a container. I pick this method so individuals
|
|
||||||
without cloud accounts or access to VMs can try this out and for it’s speed of deployment.
|
|
||||||
|
|
||||||
## Why Netdata, Prometheus, and Grafana
|
|
||||||
|
|
||||||
Some time ago I was introduced to Netdata by a coworker. We were attempting to troubleshoot python code which seemed to
|
|
||||||
be bottlenecked. I was instantly impressed by the amount of metrics Netdata exposes to you. I quickly added Netdata to
|
|
||||||
my set of go-to tools when troubleshooting systems performance.
|
|
||||||
|
|
||||||
Some time ago, even later, I was introduced to Prometheus. Prometheus is a monitoring application which flips the normal
|
|
||||||
architecture around and polls rest endpoints for its metrics. This architectural change greatly simplifies and decreases
|
|
||||||
the time necessary to begin monitoring your applications. Compared to current monitoring solutions the time spent on
|
|
||||||
designing the infrastructure is greatly reduced. Running a single Prometheus server per application becomes feasible
|
|
||||||
with the help of Grafana.
|
|
||||||
|
|
||||||
Grafana has been the go to graphing tool for… some time now. It’s awesome, anyone that has used it knows it’s awesome.
|
|
||||||
We can point Grafana at Prometheus and use Prometheus as a data source. This allows a pretty simple overall monitoring
|
|
||||||
architecture: Install Netdata on your application servers, point Prometheus at Netdata, and then point Grafana at
|
|
||||||
Prometheus.
|
|
||||||
|
|
||||||
I’m omitting an important ingredient in this stack in order to keep this tutorial simple and that is service discovery.
|
|
||||||
My personal preference is to use Consul. Prometheus can plug into consul and automatically begin to scrape new hosts
|
|
||||||
that register a Netdata client with Consul.
|
|
||||||
|
|
||||||
At the end of this tutorial you will understand how each technology fits together to create a modern monitoring stack.
|
|
||||||
This stack will offer you visibility into your application and systems performance.
|
|
||||||
|
|
||||||
## Getting Started - Netdata
|
|
||||||
|
|
||||||
To begin let’s create our container which we will install Netdata on. We need to run a container, forward the necessary
|
|
||||||
port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
|
|
||||||
this we want name resolution between the two containers to work. In order to accomplish this we will create a
|
|
||||||
user-defined network and attach both containers to this network. The first command we should run is:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker network create --driver bridge netdata-tutorial
|
|
||||||
```
|
|
||||||
|
|
||||||
With this user-defined network created we can now launch our container we will install Netdata on and point it to this
|
|
||||||
network.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash'
|
|
||||||
```
|
|
||||||
|
|
||||||
This command creates an interactive tty session (-it), gives the container both a name in relation to the docker daemon
|
|
||||||
and a hostname (this is so you know what container is which when working in the shells and docker maps hostname
|
|
||||||
resolution to this container), forwards the local port 19999 to the container’s port 19999 (-p 19999:19999), sets the
|
|
||||||
command to run (/bin/bash) and then chooses the base container images (centos:latest). After running this you should be
|
|
||||||
sitting inside the shell of the container.
|
|
||||||
|
|
||||||
After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
|
|
||||||
link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
|
|
||||||
any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
|
|
||||||
the following command in your container.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
|
|
||||||
```
|
|
||||||
|
|
||||||
After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
|
|
||||||
localhost if you’re doing this on a VM or have the docker container hosted on a machine not on your local system). If
|
|
||||||
this is your first time using Netdata I suggest you take a look around. The amount of time I’ve spent digging through
|
|
||||||
/proc and calculating my own metrics has been greatly reduced by this tool. Take it all in.
|
|
||||||
|
|
||||||
Next I want to draw your attention to a particular endpoint. Navigate to
|
|
||||||
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
|
|
||||||
publishes all the metrics in a format which Prometheus understands. Let’s take a look at one of these metrics.
|
|
||||||
`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
|
|
||||||
metric is representing several things which I will go in more details in the section on prometheus. For now understand
|
|
||||||
that this metric: `netdata_system_cpu_percentage_average` has several labels: (chart, family, dimension). This
|
|
||||||
corresponds with the first cpu chart you see on the Netdata dashboard.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
This CHART is called ‘system.cpu’, The FAMILY is cpu, and the DIMENSION we are observing is “system”. You can begin to
|
|
||||||
draw links between the charts in Netdata to the prometheus metrics format in this manner.
|
|
||||||
|
|
||||||
## Prometheus
|
|
||||||
|
|
||||||
We will be installing prometheus in a container for purpose of demonstration. While prometheus does have an official
|
|
||||||
container I would like to walk through the install process and setup on a fresh container. This will allow anyone
|
|
||||||
reading to migrate this tutorial to a VM or Server of any sort.
|
|
||||||
|
|
||||||
Let’s start another container in the same fashion as we did the Netdata container.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -it --name prometheus --hostname prometheus
|
|
||||||
--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
|
|
||||||
```
|
|
||||||
|
|
||||||
This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
|
|
||||||
files later in this tutorial.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
yum install vim -y
|
|
||||||
```
|
|
||||||
|
|
||||||
Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
|
|
||||||
|
|
||||||
Let’s download the latest version and install into your container.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
|
|
||||||
| grep "browser_download_url.*linux-amd64.tar.gz" \
|
|
||||||
| cut -d '"' -f 4 \
|
|
||||||
| wget -qi -
|
|
||||||
|
|
||||||
mkdir /opt/prometheus
|
|
||||||
|
|
||||||
sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
|
|
||||||
```
|
|
||||||
|
|
||||||
This should get prometheus installed into the container. Let’s test that we can run prometheus and connect to it’s web
|
|
||||||
interface.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
/opt/prometheus/prometheus
|
|
||||||
```
|
|
||||||
|
|
||||||
Now attempt to go to <http://localhost:9090/>. You should be presented with the prometheus homepage. This is a good
|
|
||||||
point to talk about Prometheus’s data model which can be viewed here: <https://prometheus.io/docs/concepts/data_model/>
|
|
||||||
As explained we have two key elements in Prometheus metrics. We have the ‘metric’ and its ‘labels’. Labels allow for
|
|
||||||
granularity between metrics. Let’s use our previous example to further explain.
|
|
||||||
|
|
||||||
```conf
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
|
|
||||||
```
|
|
||||||
|
|
||||||
Here our metric is ‘netdata_system_cpu_percentage_average’ and our labels are ‘chart’, ‘family’, and ‘dimension. The
|
|
||||||
last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
|
|
||||||
system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
|
|
||||||
|
|
||||||
Let’s move our attention to Prometheus’s configuration. Prometheus gets it config from the file located (in our example)
|
|
||||||
at `/opt/prometheus/prometheus.yml`. I won’t spend an extensive amount of time going over the configuration values
|
|
||||||
documented here: <https://prometheus.io/docs/operating/configuration/>. We will be adding a new“job” under the
|
|
||||||
“scrape_configs”. Let’s make the “scrape_configs” section look like this (we can use the dns name Netdata due to the
|
|
||||||
custom user-defined network we created in docker beforehand).
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
scrape_configs:
|
|
||||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
|
||||||
- job_name: 'prometheus'
|
|
||||||
|
|
||||||
# metrics_path defaults to '/metrics'
|
|
||||||
# scheme defaults to 'http'.
|
|
||||||
|
|
||||||
static_configs:
|
|
||||||
- targets: ['localhost:9090']
|
|
||||||
|
|
||||||
- job_name: 'netdata'
|
|
||||||
|
|
||||||
metrics_path: /api/v1/allmetrics
|
|
||||||
params:
|
|
||||||
format: [ prometheus ]
|
|
||||||
|
|
||||||
static_configs:
|
|
||||||
- targets: ['netdata:19999']
|
|
||||||
```
|
|
||||||
|
|
||||||
Let’s start prometheus once again by running `/opt/prometheus/prometheus`. If we now navigate to prometheus at
|
|
||||||
‘<http://localhost:9090/targets’> we should see our target being successfully scraped. If we now go back to the
|
|
||||||
Prometheus’s homepage and begin to type ‘netdata\_’ Prometheus should auto complete metrics it is now scraping.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Let’s now start exploring how we can graph some metrics. Back in our Netdata container lets get the CPU spinning with a
|
|
||||||
pointless busy loop. On the shell do the following:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
|
|
||||||
```
|
|
||||||
|
|
||||||
Our Netdata cpu graph should be showing some activity. Let’s represent this in Prometheus. In order to do this let’s
|
|
||||||
keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> We are
|
|
||||||
setting out to graph the data in the CPU chart so let’s search for “system.cpu”in the metrics page above. We come across
|
|
||||||
a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu", family
|
|
||||||
"cpu", units "percentage"` Followed by the metrics. This is a good start now let us drill down to the specific metric we
|
|
||||||
would like to graph.
|
|
||||||
|
|
||||||
```conf
|
|
||||||
# COMMENT
|
|
||||||
netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
|
|
||||||
```
|
|
||||||
|
|
||||||
Here we learn that the metric name we care about is‘netdata_system_cpu_percentage_average’ so throw this into Prometheus
|
|
||||||
and see what we get. We should see something similar to this (I shut off my busy loop)
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
This is a good step toward what we want. Also make note that Prometheus will tag on an ‘instance’ label for us which
|
|
||||||
corresponds to our statically defined job in the configuration file. This allows us to tailor our queries to specific
|
|
||||||
instances. Now we need to isolate the dimension we want in our query. To do this let us refine the query slightly. Let’s
|
|
||||||
query the dimension also. Place this into our query text box.
|
|
||||||
`netdata_system_cpu_percentage_average{dimension="system"}` We now wind up with the following graph.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Awesome, this is exactly what we wanted. If you haven’t caught on yet we can emulate entire charts from Netdata by using
|
|
||||||
the `chart` dimension. If you’d like you can combine the ‘chart’ and ‘instance’ dimension to create per-instance charts.
|
|
||||||
Let’s give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
|
|
||||||
|
|
||||||
This is the basics of using Prometheus to query Netdata. I’d advise everyone at this point to read [this
|
|
||||||
page](/backends/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
|
|
||||||
its internal DB or can send metrics “as-collected” by specifying the ‘source=as-collected’ url parameter like so.
|
|
||||||
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
|
|
||||||
this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
|
|
||||||
obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
|
|
||||||
`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
|
|
||||||
metrics returned by Netdata's internal database (not specifying any source= url parameter) then use that. If you find
|
|
||||||
limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
|
|
||||||
chart.
|
|
||||||
|
|
||||||
## Grafana
|
|
||||||
|
|
||||||
Finally we make it to grafana. This is the easiest part in my opinion. This time we will actually run the official
|
|
||||||
grafana docker container as all configuration we need to do is done via the GUI. Let’s run the following command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
|
|
||||||
```
|
|
||||||
|
|
||||||
This will get grafana running at ‘<http://localhost:3000/’> Let’s go there and
|
|
||||||
|
|
||||||
login using the credentials Admin:Admin.
|
|
||||||
|
|
||||||
The first thing we want to do is click ‘Add data source’. Let’s make it look like the following screenshot
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
With this completed let’s graph! Create a new Dashboard by clicking on the top left Grafana Icon and create a new graph
|
|
||||||
in that dashboard. Fill in the query like we did above and save.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
|
|
||||||
There you have it, a complete systems monitoring stack which is very easy to deploy. From here I would begin to
|
|
||||||
understand how Prometheus and a service discovery mechanism such as Consul can play together nicely. My current prod
|
|
||||||
deployments automatically register Netdata services into Consul and Prometheus automatically begins to scrape them. Once
|
|
||||||
achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
|
|
||||||
happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
|
|
||||||
monitoring.
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
|
|
||||||
dist_noinst_DATA = \
|
|
||||||
README.md \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
dist_libconfig_DATA = \
|
|
||||||
aws_kinesis.conf \
|
|
||||||
$(NULL)
|
|
||||||
@ -1,53 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Using Netdata with AWS Kinesis Data Streams"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/aws_kinesis/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Using Netdata with AWS Kinesis Data Streams
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To use AWS Kinesis as a backend AWS SDK for C++ should be
|
|
||||||
[installed](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) first. `libcrypto`, `libssl`,
|
|
||||||
and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata should be re-installed
|
|
||||||
from the source. The installer will detect that the required libraries are now available.
|
|
||||||
|
|
||||||
If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY="kinesis"`. Otherwise, the
|
|
||||||
building process could take a very long time. Take a note, that the default installation path for the libraries is
|
|
||||||
`/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is
|
|
||||||
advisable to use the following options to `cmake` while building the AWS SDK:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
cmake -DCMAKE_INSTALL_LIBDIR=/usr/lib -DCMAKE_INSTALL_INCLUDEDIR=/usr/include -DBUILD_SHARED_LIBS=OFF -DBUILD_ONLY=kinesis <aws-sdk-cpp sources>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
To enable data sending to the kinesis backend set the following options in `netdata.conf`:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
enabled = yes
|
|
||||||
type = kinesis
|
|
||||||
destination = us-east-1
|
|
||||||
```
|
|
||||||
|
|
||||||
set the `destination` option to an AWS region.
|
|
||||||
|
|
||||||
In the Netdata configuration directory run `./edit-config aws_kinesis.conf` and set AWS credentials and stream name:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# AWS credentials
|
|
||||||
aws_access_key_id = your_access_key_id
|
|
||||||
aws_secret_access_key = your_secret_access_key
|
|
||||||
|
|
||||||
# destination stream
|
|
||||||
stream name = your_stream_name
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, AWS credentials can be set for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
|
|
||||||
|
|
||||||
A partition key for every record is computed automatically by Netdata with the purpose to distribute records across
|
|
||||||
available shards evenly.
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,94 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "aws_kinesis.h"
|
|
||||||
|
|
||||||
#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// kinesis backend
|
|
||||||
|
|
||||||
// read the aws_kinesis.conf file
|
|
||||||
int read_kinesis_conf(const char *path, char **access_key_id_p, char **secret_access_key_p, char **stream_name_p)
|
|
||||||
{
|
|
||||||
char *access_key_id = *access_key_id_p;
|
|
||||||
char *secret_access_key = *secret_access_key_p;
|
|
||||||
char *stream_name = *stream_name_p;
|
|
||||||
|
|
||||||
if(unlikely(access_key_id)) freez(access_key_id);
|
|
||||||
if(unlikely(secret_access_key)) freez(secret_access_key);
|
|
||||||
if(unlikely(stream_name)) freez(stream_name);
|
|
||||||
access_key_id = NULL;
|
|
||||||
secret_access_key = NULL;
|
|
||||||
stream_name = NULL;
|
|
||||||
|
|
||||||
int line = 0;
|
|
||||||
|
|
||||||
char filename[FILENAME_MAX + 1];
|
|
||||||
snprintfz(filename, FILENAME_MAX, "%s/aws_kinesis.conf", path);
|
|
||||||
|
|
||||||
char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
|
|
||||||
|
|
||||||
debug(D_BACKEND, "BACKEND: opening config file '%s'", filename);
|
|
||||||
|
|
||||||
FILE *fp = fopen(filename, "r");
|
|
||||||
if(!fp) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
|
|
||||||
buffer[CONFIG_FILE_LINE_MAX] = '\0';
|
|
||||||
line++;
|
|
||||||
|
|
||||||
s = trim(buffer);
|
|
||||||
if(!s || *s == '#') {
|
|
||||||
debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *name = s;
|
|
||||||
char *value = strchr(s, '=');
|
|
||||||
if(unlikely(!value)) {
|
|
||||||
error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
*value = '\0';
|
|
||||||
value++;
|
|
||||||
|
|
||||||
name = trim(name);
|
|
||||||
value = trim(value);
|
|
||||||
|
|
||||||
if(unlikely(!name || *name == '#')) {
|
|
||||||
error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!value)
|
|
||||||
value = "";
|
|
||||||
else
|
|
||||||
value = strip_quotes(value);
|
|
||||||
|
|
||||||
if(name[0] == 'a' && name[4] == 'a' && !strcmp(name, "aws_access_key_id")) {
|
|
||||||
access_key_id = strdupz(value);
|
|
||||||
}
|
|
||||||
else if(name[0] == 'a' && name[4] == 's' && !strcmp(name, "aws_secret_access_key")) {
|
|
||||||
secret_access_key = strdupz(value);
|
|
||||||
}
|
|
||||||
else if(name[0] == 's' && !strcmp(name, "stream name")) {
|
|
||||||
stream_name = strdupz(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fclose(fp);
|
|
||||||
|
|
||||||
if(unlikely(!stream_name || !*stream_name)) {
|
|
||||||
error("BACKEND: stream name is a mandatory Kinesis parameter but it is not configured");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
*access_key_id_p = access_key_id;
|
|
||||||
*secret_access_key_p = secret_access_key;
|
|
||||||
*stream_name_p = stream_name;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,10 +0,0 @@
|
|||||||
# AWS Kinesis Data Streams backend configuration
|
|
||||||
#
|
|
||||||
# All options in this file are mandatory
|
|
||||||
|
|
||||||
# AWS credentials
|
|
||||||
aws_access_key_id =
|
|
||||||
aws_secret_access_key =
|
|
||||||
|
|
||||||
# destination stream
|
|
||||||
stream name =
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_KINESIS_H
|
|
||||||
#define NETDATA_BACKEND_KINESIS_H
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
#include "aws_kinesis_put_record.h"
|
|
||||||
|
|
||||||
#define KINESIS_PARTITION_KEY_MAX 256
|
|
||||||
#define KINESIS_RECORD_MAX 1024 * 1024
|
|
||||||
|
|
||||||
extern int read_kinesis_conf(const char *path, char **auth_key_id_p, char **secure_key_p, char **stream_name_p);
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_KINESIS_H
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#include <aws/core/Aws.h>
|
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
|
||||||
#include <aws/core/auth/AWSCredentials.h>
|
|
||||||
#include <aws/core/utils/Outcome.h>
|
|
||||||
#include <aws/kinesis/KinesisClient.h>
|
|
||||||
#include <aws/kinesis/model/PutRecordRequest.h>
|
|
||||||
#include "aws_kinesis_put_record.h"
|
|
||||||
|
|
||||||
using namespace Aws;
|
|
||||||
|
|
||||||
static SDKOptions options;
|
|
||||||
|
|
||||||
static Kinesis::KinesisClient *client;
|
|
||||||
|
|
||||||
struct request_outcome {
|
|
||||||
Kinesis::Model::PutRecordOutcomeCallable future_outcome;
|
|
||||||
size_t data_len;
|
|
||||||
};
|
|
||||||
|
|
||||||
static Vector<request_outcome> request_outcomes;
|
|
||||||
|
|
||||||
void backends_kinesis_init(const char *region, const char *access_key_id, const char *secret_key, const long timeout) {
|
|
||||||
InitAPI(options);
|
|
||||||
|
|
||||||
Client::ClientConfiguration config;
|
|
||||||
|
|
||||||
config.region = region;
|
|
||||||
config.requestTimeoutMs = timeout;
|
|
||||||
config.connectTimeoutMs = timeout;
|
|
||||||
|
|
||||||
if(access_key_id && *access_key_id && secret_key && *secret_key) {
|
|
||||||
client = New<Kinesis::KinesisClient>("client", Auth::AWSCredentials(access_key_id, secret_key), config);
|
|
||||||
} else {
|
|
||||||
client = New<Kinesis::KinesisClient>("client", config);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_kinesis_shutdown() {
|
|
||||||
Delete(client);
|
|
||||||
|
|
||||||
ShutdownAPI(options);
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_kinesis_put_record(const char *stream_name, const char *partition_key,
|
|
||||||
const char *data, size_t data_len) {
|
|
||||||
Kinesis::Model::PutRecordRequest request;
|
|
||||||
|
|
||||||
request.SetStreamName(stream_name);
|
|
||||||
request.SetPartitionKey(partition_key);
|
|
||||||
request.SetData(Utils::ByteBuffer((unsigned char*) data, data_len));
|
|
||||||
|
|
||||||
request_outcomes.push_back({client->PutRecordCallable(request), data_len});
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_kinesis_get_result(char *error_message, size_t *sent_bytes, size_t *lost_bytes) {
|
|
||||||
Kinesis::Model::PutRecordOutcome outcome;
|
|
||||||
*sent_bytes = 0;
|
|
||||||
*lost_bytes = 0;
|
|
||||||
|
|
||||||
for(auto request_outcome = request_outcomes.begin(); request_outcome != request_outcomes.end(); ) {
|
|
||||||
std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100));
|
|
||||||
|
|
||||||
if(status == std::future_status::ready || status == std::future_status::deferred) {
|
|
||||||
outcome = request_outcome->future_outcome.get();
|
|
||||||
*sent_bytes += request_outcome->data_len;
|
|
||||||
|
|
||||||
if(!outcome.IsSuccess()) {
|
|
||||||
*lost_bytes += request_outcome->data_len;
|
|
||||||
outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
request_outcomes.erase(request_outcome);
|
|
||||||
} else {
|
|
||||||
++request_outcome;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(*lost_bytes) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,25 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_KINESIS_PUT_RECORD_H
|
|
||||||
#define NETDATA_BACKEND_KINESIS_PUT_RECORD_H
|
|
||||||
|
|
||||||
#define ERROR_LINE_MAX 1023
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void backends_kinesis_init(const char *region, const char *access_key_id, const char *secret_key, const long timeout);
|
|
||||||
|
|
||||||
void backends_kinesis_shutdown();
|
|
||||||
|
|
||||||
int backends_kinesis_put_record(const char *stream_name, const char *partition_key,
|
|
||||||
const char *data, size_t data_len);
|
|
||||||
|
|
||||||
int backends_kinesis_get_result(char *error_message, size_t *sent_bytes, size_t *lost_bytes);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_KINESIS_PUT_RECORD_H
|
|
||||||
1251
backends/backends.c
1251
backends/backends.c
File diff suppressed because it is too large
Load Diff
@ -1,98 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKENDS_H
|
|
||||||
#define NETDATA_BACKENDS_H 1
|
|
||||||
|
|
||||||
#include "daemon/common.h"
|
|
||||||
|
|
||||||
typedef enum backend_options {
|
|
||||||
BACKEND_OPTION_NONE = 0,
|
|
||||||
|
|
||||||
BACKEND_SOURCE_DATA_AS_COLLECTED = (1 << 0),
|
|
||||||
BACKEND_SOURCE_DATA_AVERAGE = (1 << 1),
|
|
||||||
BACKEND_SOURCE_DATA_SUM = (1 << 2),
|
|
||||||
|
|
||||||
BACKEND_OPTION_SEND_NAMES = (1 << 16)
|
|
||||||
} BACKEND_OPTIONS;
|
|
||||||
|
|
||||||
typedef enum backend_types {
|
|
||||||
BACKEND_TYPE_UNKNOWN, // Invalid type
|
|
||||||
BACKEND_TYPE_GRAPHITE, // Send plain text to Graphite
|
|
||||||
BACKEND_TYPE_OPENTSDB_USING_TELNET, // Send data to OpenTSDB using telnet API
|
|
||||||
BACKEND_TYPE_OPENTSDB_USING_HTTP, // Send data to OpenTSDB using HTTP API
|
|
||||||
BACKEND_TYPE_JSON, // Stores the data using JSON.
|
|
||||||
BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE, // The user selected to use Prometheus backend
|
|
||||||
BACKEND_TYPE_KINESIS, // Send message to AWS Kinesis
|
|
||||||
BACKEND_TYPE_MONGODB, // Send data to MongoDB collection
|
|
||||||
BACKEND_TYPE_NUM // Number of backend types
|
|
||||||
} BACKEND_TYPE;
|
|
||||||
|
|
||||||
typedef int (**backend_response_checker_t)(BUFFER *);
|
|
||||||
typedef int (**backend_request_formatter_t)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS);
|
|
||||||
|
|
||||||
#define BACKEND_OPTIONS_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM)
|
|
||||||
#define BACKEND_OPTIONS_DATA_SOURCE(backend_options) (backend_options & BACKEND_OPTIONS_SOURCE_BITS)
|
|
||||||
|
|
||||||
extern int global_backend_update_every;
|
|
||||||
extern BACKEND_OPTIONS global_backend_options;
|
|
||||||
extern const char *global_backend_source;
|
|
||||||
extern const char *global_backend_prefix;
|
|
||||||
extern const char *global_backend_send_charts_matching;
|
|
||||||
|
|
||||||
extern void *backends_main(void *ptr);
|
|
||||||
BACKEND_TYPE backend_select_type(const char *type);
|
|
||||||
|
|
||||||
extern BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options);
|
|
||||||
|
|
||||||
#ifdef BACKENDS_INTERNALS
|
|
||||||
|
|
||||||
extern int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st);
|
|
||||||
extern calculated_number backend_calculate_value_from_stored_data(
|
|
||||||
RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
, time_t *first_timestamp // the timestamp of the first point used in this response
|
|
||||||
, time_t *last_timestamp // the timestamp that should be reported to backend
|
|
||||||
);
|
|
||||||
|
|
||||||
extern size_t backend_name_copy(char *d, const char *s, size_t usable);
|
|
||||||
extern int discard_response(BUFFER *b, const char *backend);
|
|
||||||
|
|
||||||
static inline char *strip_quotes(char *str) {
|
|
||||||
if(*str == '"' || *str == '\'') {
|
|
||||||
char *s;
|
|
||||||
|
|
||||||
str++;
|
|
||||||
|
|
||||||
s = str;
|
|
||||||
while(*s) s++;
|
|
||||||
if(s != str) s--;
|
|
||||||
|
|
||||||
if(*s == '"' || *s == '\'') *s = '\0';
|
|
||||||
}
|
|
||||||
|
|
||||||
return str;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // BACKENDS_INTERNALS
|
|
||||||
|
|
||||||
#include "backends/prometheus/backend_prometheus.h"
|
|
||||||
#include "backends/graphite/graphite.h"
|
|
||||||
#include "backends/json/json.h"
|
|
||||||
#include "backends/opentsdb/opentsdb.h"
|
|
||||||
|
|
||||||
#if HAVE_KINESIS
|
|
||||||
#include "backends/aws_kinesis/aws_kinesis.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if ENABLE_PROMETHEUS_REMOTE_WRITE
|
|
||||||
#include "backends/prometheus/remote_write/remote_write.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_MONGOC
|
|
||||||
#include "backends/mongodb/mongodb.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* NETDATA_BACKENDS_H */
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
@ -1,90 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "graphite.h"
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// graphite backend
|
|
||||||
|
|
||||||
int backends_format_dimension_collected_graphite_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
(void)after;
|
|
||||||
(void)before;
|
|
||||||
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
buffer_sprintf(
|
|
||||||
b
|
|
||||||
, "%s.%s.%s.%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, prefix
|
|
||||||
, hostname
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (host->tags)?";":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
, rd->last_collected_value
|
|
||||||
, (unsigned long long)rd->last_collected_time.tv_sec
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_format_dimension_stored_graphite_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
if(!isnan(value)) {
|
|
||||||
|
|
||||||
buffer_sprintf(
|
|
||||||
b
|
|
||||||
, "%s.%s.%s.%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, prefix
|
|
||||||
, hostname
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (host->tags)?";":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
, value
|
|
||||||
, (unsigned long long) last_t
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int process_graphite_response(BUFFER *b) {
|
|
||||||
return discard_response(b, "graphite");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,35 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_GRAPHITE_H
|
|
||||||
#define NETDATA_BACKEND_GRAPHITE_H
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
|
|
||||||
extern int backends_format_dimension_collected_graphite_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int backends_format_dimension_stored_graphite_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int process_graphite_response(BUFFER *b);
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_GRAPHITE_H
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
@ -1,152 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "json.h"
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// json backend
|
|
||||||
|
|
||||||
int backends_format_dimension_collected_json_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
(void)after;
|
|
||||||
(void)before;
|
|
||||||
(void)backend_options;
|
|
||||||
|
|
||||||
const char *tags_pre = "", *tags_post = "", *tags = host->tags;
|
|
||||||
if(!tags) tags = "";
|
|
||||||
|
|
||||||
if(*tags) {
|
|
||||||
if(*tags == '{' || *tags == '[' || *tags == '"') {
|
|
||||||
tags_pre = "\"host_tags\":";
|
|
||||||
tags_post = ",";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
tags_pre = "\"host_tags\":\"";
|
|
||||||
tags_post = "\",";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_sprintf(b, "{"
|
|
||||||
"\"prefix\":\"%s\","
|
|
||||||
"\"hostname\":\"%s\","
|
|
||||||
"%s%s%s"
|
|
||||||
|
|
||||||
"\"chart_id\":\"%s\","
|
|
||||||
"\"chart_name\":\"%s\","
|
|
||||||
"\"chart_family\":\"%s\","
|
|
||||||
"\"chart_context\": \"%s\","
|
|
||||||
"\"chart_type\":\"%s\","
|
|
||||||
"\"units\": \"%s\","
|
|
||||||
|
|
||||||
"\"id\":\"%s\","
|
|
||||||
"\"name\":\"%s\","
|
|
||||||
"\"value\":" COLLECTED_NUMBER_FORMAT ","
|
|
||||||
|
|
||||||
"\"timestamp\": %llu}\n",
|
|
||||||
prefix,
|
|
||||||
hostname,
|
|
||||||
tags_pre, tags, tags_post,
|
|
||||||
|
|
||||||
st->id,
|
|
||||||
st->name,
|
|
||||||
st->family,
|
|
||||||
st->context,
|
|
||||||
st->type,
|
|
||||||
st->units,
|
|
||||||
|
|
||||||
rd->id,
|
|
||||||
rd->name,
|
|
||||||
rd->last_collected_value,
|
|
||||||
|
|
||||||
(unsigned long long) rd->last_collected_time.tv_sec
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_format_dimension_stored_json_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
if(!isnan(value)) {
|
|
||||||
const char *tags_pre = "", *tags_post = "", *tags = host->tags;
|
|
||||||
if(!tags) tags = "";
|
|
||||||
|
|
||||||
if(*tags) {
|
|
||||||
if(*tags == '{' || *tags == '[' || *tags == '"') {
|
|
||||||
tags_pre = "\"host_tags\":";
|
|
||||||
tags_post = ",";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
tags_pre = "\"host_tags\":\"";
|
|
||||||
tags_post = "\",";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_sprintf(b, "{"
|
|
||||||
"\"prefix\":\"%s\","
|
|
||||||
"\"hostname\":\"%s\","
|
|
||||||
"%s%s%s"
|
|
||||||
|
|
||||||
"\"chart_id\":\"%s\","
|
|
||||||
"\"chart_name\":\"%s\","
|
|
||||||
"\"chart_family\":\"%s\","
|
|
||||||
"\"chart_context\": \"%s\","
|
|
||||||
"\"chart_type\":\"%s\","
|
|
||||||
"\"units\": \"%s\","
|
|
||||||
|
|
||||||
"\"id\":\"%s\","
|
|
||||||
"\"name\":\"%s\","
|
|
||||||
"\"value\":" CALCULATED_NUMBER_FORMAT ","
|
|
||||||
|
|
||||||
"\"timestamp\": %llu}\n",
|
|
||||||
prefix,
|
|
||||||
hostname,
|
|
||||||
tags_pre, tags, tags_post,
|
|
||||||
|
|
||||||
st->id,
|
|
||||||
st->name,
|
|
||||||
st->family,
|
|
||||||
st->context,
|
|
||||||
st->type,
|
|
||||||
st->units,
|
|
||||||
|
|
||||||
rd->id,
|
|
||||||
rd->name,
|
|
||||||
value,
|
|
||||||
|
|
||||||
(unsigned long long) last_t
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int process_json_response(BUFFER *b) {
|
|
||||||
return discard_response(b, "json");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_JSON_H
|
|
||||||
#define NETDATA_BACKEND_JSON_H
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
|
|
||||||
extern int backends_format_dimension_collected_json_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int backends_format_dimension_stored_json_plaintext(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int process_json_response(BUFFER *b);
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_JSON_H
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
|
|
||||||
dist_noinst_DATA = \
|
|
||||||
README.md \
|
|
||||||
$(NULL)
|
|
||||||
@ -1,41 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "MongoDB backend"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/mongodb/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# MongoDB backend
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To use MongoDB as a backend, `libmongoc` 1.7.0 or higher should be
|
|
||||||
[installed](http://mongoc.org/libmongoc/current/installing.html) first. Next, Netdata should be re-installed from the
|
|
||||||
source. The installer will detect that the required libraries are now available.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
To enable data sending to the MongoDB backend set the following options in `netdata.conf`:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
enabled = yes
|
|
||||||
type = mongodb
|
|
||||||
```
|
|
||||||
|
|
||||||
In the Netdata configuration directory run `./edit-config mongodb.conf` and set [MongoDB
|
|
||||||
URI](https://docs.mongodb.com/manual/reference/connection-string/), database name, and collection name:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# URI
|
|
||||||
uri = mongodb://<hostname>
|
|
||||||
|
|
||||||
# database name
|
|
||||||
database = your_database_name
|
|
||||||
|
|
||||||
# collection name
|
|
||||||
collection = your_collection_name
|
|
||||||
```
|
|
||||||
|
|
||||||
The default socket timeout depends on the backend update interval. The timeout is 500 ms shorter than the interval (but
|
|
||||||
not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option.
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,189 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "mongodb.h"
|
|
||||||
#include <mongoc.h>
|
|
||||||
|
|
||||||
#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
|
|
||||||
|
|
||||||
static mongoc_client_t *mongodb_client;
|
|
||||||
static mongoc_collection_t *mongodb_collection;
|
|
||||||
|
|
||||||
int backends_mongodb_init(const char *uri_string,
|
|
||||||
const char *database_string,
|
|
||||||
const char *collection_string,
|
|
||||||
int32_t default_socket_timeout) {
|
|
||||||
mongoc_uri_t *uri;
|
|
||||||
bson_error_t error;
|
|
||||||
|
|
||||||
mongoc_init();
|
|
||||||
|
|
||||||
uri = mongoc_uri_new_with_error(uri_string, &error);
|
|
||||||
if(unlikely(!uri)) {
|
|
||||||
error("BACKEND: failed to parse URI: %s. Error message: %s", uri_string, error.message);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t socket_timeout = mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, default_socket_timeout);
|
|
||||||
if(!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) {
|
|
||||||
error("BACKEND: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout);
|
|
||||||
return 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
mongodb_client = mongoc_client_new_from_uri(uri);
|
|
||||||
if(unlikely(!mongodb_client)) {
|
|
||||||
error("BACKEND: failed to create a new client");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!mongoc_client_set_appname(mongodb_client, "netdata")) {
|
|
||||||
error("BACKEND: failed to set client appname");
|
|
||||||
};
|
|
||||||
|
|
||||||
mongodb_collection = mongoc_client_get_collection(mongodb_client, database_string, collection_string);
|
|
||||||
|
|
||||||
mongoc_uri_destroy(uri);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_free_bson(bson_t **insert, size_t n_documents) {
|
|
||||||
size_t i;
|
|
||||||
|
|
||||||
for(i = 0; i < n_documents; i++)
|
|
||||||
bson_destroy(insert[i]);
|
|
||||||
|
|
||||||
free(insert);
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_mongodb_insert(char *data, size_t n_metrics) {
|
|
||||||
bson_t **insert = calloc(n_metrics, sizeof(bson_t *));
|
|
||||||
bson_error_t error;
|
|
||||||
char *start = data, *end = data;
|
|
||||||
size_t n_documents = 0;
|
|
||||||
|
|
||||||
while(*end && n_documents <= n_metrics) {
|
|
||||||
while(*end && *end != '\n') end++;
|
|
||||||
|
|
||||||
if(likely(*end)) {
|
|
||||||
*end = '\0';
|
|
||||||
end++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
insert[n_documents] = bson_new_from_json((const uint8_t *)start, -1, &error);
|
|
||||||
|
|
||||||
if(unlikely(!insert[n_documents])) {
|
|
||||||
error("BACKEND: %s", error.message);
|
|
||||||
backends_free_bson(insert, n_documents);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
start = end;
|
|
||||||
|
|
||||||
n_documents++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(unlikely(!mongoc_collection_insert_many(mongodb_collection, (const bson_t **)insert, n_documents, NULL, NULL, &error))) {
|
|
||||||
error("BACKEND: %s", error.message);
|
|
||||||
backends_free_bson(insert, n_documents);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
backends_free_bson(insert, n_documents);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_mongodb_cleanup() {
|
|
||||||
mongoc_collection_destroy(mongodb_collection);
|
|
||||||
mongoc_client_destroy(mongodb_client);
|
|
||||||
mongoc_cleanup();
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p) {
|
|
||||||
char *uri = *uri_p;
|
|
||||||
char *database = *database_p;
|
|
||||||
char *collection = *collection_p;
|
|
||||||
|
|
||||||
if(unlikely(uri)) freez(uri);
|
|
||||||
if(unlikely(database)) freez(database);
|
|
||||||
if(unlikely(collection)) freez(collection);
|
|
||||||
uri = NULL;
|
|
||||||
database = NULL;
|
|
||||||
collection = NULL;
|
|
||||||
|
|
||||||
int line = 0;
|
|
||||||
|
|
||||||
char filename[FILENAME_MAX + 1];
|
|
||||||
snprintfz(filename, FILENAME_MAX, "%s/mongodb.conf", path);
|
|
||||||
|
|
||||||
char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
|
|
||||||
|
|
||||||
debug(D_BACKEND, "BACKEND: opening config file '%s'", filename);
|
|
||||||
|
|
||||||
FILE *fp = fopen(filename, "r");
|
|
||||||
if(!fp) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
|
|
||||||
buffer[CONFIG_FILE_LINE_MAX] = '\0';
|
|
||||||
line++;
|
|
||||||
|
|
||||||
s = trim(buffer);
|
|
||||||
if(!s || *s == '#') {
|
|
||||||
debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *name = s;
|
|
||||||
char *value = strchr(s, '=');
|
|
||||||
if(unlikely(!value)) {
|
|
||||||
error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
*value = '\0';
|
|
||||||
value++;
|
|
||||||
|
|
||||||
name = trim(name);
|
|
||||||
value = trim(value);
|
|
||||||
|
|
||||||
if(unlikely(!name || *name == '#')) {
|
|
||||||
error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!value)
|
|
||||||
value = "";
|
|
||||||
else
|
|
||||||
value = strip_quotes(value);
|
|
||||||
|
|
||||||
if(name[0] == 'u' && !strcmp(name, "uri")) {
|
|
||||||
uri = strdupz(value);
|
|
||||||
}
|
|
||||||
else if(name[0] == 'd' && !strcmp(name, "database")) {
|
|
||||||
database = strdupz(value);
|
|
||||||
}
|
|
||||||
else if(name[0] == 'c' && !strcmp(name, "collection")) {
|
|
||||||
collection = strdupz(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fclose(fp);
|
|
||||||
|
|
||||||
if(unlikely(!collection || !*collection)) {
|
|
||||||
error("BACKEND: collection name is a mandatory MongoDB parameter, but it is not configured");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
*uri_p = uri;
|
|
||||||
*database_p = database;
|
|
||||||
*collection_p = collection;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
# MongoDB backend configuration
|
|
||||||
#
|
|
||||||
# All options in this file are mandatory
|
|
||||||
|
|
||||||
# URI
|
|
||||||
uri =
|
|
||||||
|
|
||||||
# database name
|
|
||||||
database =
|
|
||||||
|
|
||||||
# collection name
|
|
||||||
collection =
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_MONGODB_H
|
|
||||||
#define NETDATA_BACKEND_MONGODB_H
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
|
|
||||||
extern int backends_mongodb_init(const char *uri_string, const char *database_string, const char *collection_string, const int32_t socket_timeout);
|
|
||||||
|
|
||||||
extern int backends_mongodb_insert(char *data, size_t n_metrics);
|
|
||||||
|
|
||||||
extern void backends_mongodb_cleanup();
|
|
||||||
|
|
||||||
extern int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p);
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_MONGODB_H
|
|
||||||
@ -1,158 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
# This is a simple backend database proxy, written in BASH, using the nc command.
|
|
||||||
# Run the script without any parameters for help.
|
|
||||||
|
|
||||||
MODE="${1}"
|
|
||||||
MY_PORT="${2}"
|
|
||||||
BACKEND_HOST="${3}"
|
|
||||||
BACKEND_PORT="${4}"
|
|
||||||
FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
|
|
||||||
|
|
||||||
log() {
|
|
||||||
logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
|
|
||||||
}
|
|
||||||
|
|
||||||
mync() {
|
|
||||||
local ret
|
|
||||||
|
|
||||||
log "Running: nc ${*}"
|
|
||||||
nc "${@}"
|
|
||||||
ret=$?
|
|
||||||
|
|
||||||
log "nc stopped with return code ${ret}."
|
|
||||||
|
|
||||||
return ${ret}
|
|
||||||
}
|
|
||||||
|
|
||||||
listen_save_replay_forever() {
|
|
||||||
local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
|
|
||||||
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
log "Starting nc to listen on port ${port} and save metrics to ${file}"
|
|
||||||
|
|
||||||
started=$(date +%s)
|
|
||||||
mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
|
|
||||||
ended=$(date +%s)
|
|
||||||
|
|
||||||
if [ -s "${file}" ]
|
|
||||||
then
|
|
||||||
if [ ! -z "${real_backend_host}" ] && [ ! -z "${real_backend_port}" ]
|
|
||||||
then
|
|
||||||
log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
|
|
||||||
|
|
||||||
mync "${real_backend_host}" "${real_backend_port}" <"${file}"
|
|
||||||
ret=$?
|
|
||||||
|
|
||||||
if [ ${ret} -eq 0 ]
|
|
||||||
then
|
|
||||||
log "Successfully sent the metrics to ${real_backend_host}:${real_backend_port}"
|
|
||||||
mv "${file}" "${file}.old"
|
|
||||||
touch "${file}"
|
|
||||||
else
|
|
||||||
log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log "No backend configured - appending more data to ${file}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# prevent a CPU hungry infinite loop
|
|
||||||
# if nc cannot listen to port
|
|
||||||
if [ $((ended - started)) -lt 5 ]
|
|
||||||
then
|
|
||||||
log "nc has been stopped too fast."
|
|
||||||
delay=30
|
|
||||||
else
|
|
||||||
delay=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "Waiting ${delay} seconds before listening again for data."
|
|
||||||
sleep ${delay}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "${MODE}" = "start" ]
|
|
||||||
then
|
|
||||||
|
|
||||||
# start the listener, in exclusive mode
|
|
||||||
# only one can use the same file/port at a time
|
|
||||||
{
|
|
||||||
flock -n 9
|
|
||||||
# shellcheck disable=SC2181
|
|
||||||
if [ $? -ne 0 ]
|
|
||||||
then
|
|
||||||
log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# save our PID to the lock file
|
|
||||||
echo "$$" >"${FILE}.lock"
|
|
||||||
|
|
||||||
listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
|
|
||||||
ret=$?
|
|
||||||
|
|
||||||
log "listener exited."
|
|
||||||
exit ${ret}
|
|
||||||
|
|
||||||
} 9>>"${FILE}.lock"
|
|
||||||
|
|
||||||
# we can only get here if ${FILE}.lock cannot be created
|
|
||||||
log "Cannot create file ${FILE}."
|
|
||||||
exit 3
|
|
||||||
|
|
||||||
elif [ "${MODE}" = "stop" ]
|
|
||||||
then
|
|
||||||
|
|
||||||
{
|
|
||||||
flock -n 9
|
|
||||||
# shellcheck disable=SC2181
|
|
||||||
if [ $? -ne 0 ]
|
|
||||||
then
|
|
||||||
pid=$(<"${FILE}".lock)
|
|
||||||
log "Killing process ${pid}..."
|
|
||||||
kill -TERM "-${pid}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
|
|
||||||
exit 4
|
|
||||||
|
|
||||||
} 9<"${FILE}.lock"
|
|
||||||
|
|
||||||
log "File ${FILE}.lock does not exist. Is a collector running?"
|
|
||||||
exit 5
|
|
||||||
|
|
||||||
else
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
"${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
|
|
||||||
|
|
||||||
PORT The port this script will listen
|
|
||||||
(configure netdata to use this as a second backend)
|
|
||||||
|
|
||||||
BACKEND_HOST The real backend host
|
|
||||||
BACKEND_PORT The real backend port
|
|
||||||
|
|
||||||
This script can act as fallback backend for netdata.
|
|
||||||
It will receive metrics from netdata, save them to
|
|
||||||
${FILE}
|
|
||||||
and once netdata reconnects to the real-backend, this script
|
|
||||||
will push all metrics collected to the real-backend too and
|
|
||||||
wait for a failure to happen again.
|
|
||||||
|
|
||||||
Only one netdata can connect to this script at a time.
|
|
||||||
If you need fallback for multiple netdata, run this script
|
|
||||||
multiple times with different ports.
|
|
||||||
|
|
||||||
You can run me in the background with this:
|
|
||||||
|
|
||||||
screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
|
|
||||||
EOF
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "OpenTSDB with HTTP"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/opentsdb/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# OpenTSDB with HTTP
|
|
||||||
|
|
||||||
Netdata can easily communicate with OpenTSDB using HTTP API. To enable this channel, set the following options in your
|
|
||||||
`netdata.conf`:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
type = opentsdb:http
|
|
||||||
destination = localhost:4242
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example, OpenTSDB is running with its default port, which is `4242`. If you run OpenTSDB on a different port,
|
|
||||||
change the `destination = localhost:4242` line accordingly.
|
|
||||||
|
|
||||||
## HTTPS
|
|
||||||
|
|
||||||
As of [v1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0), Netdata can send metrics to OpenTSDB using
|
|
||||||
TLS/SSL. Unfortunately, OpenTDSB does not support encrypted connections, so you will have to configure a reverse proxy
|
|
||||||
to enable HTTPS communication between Netdata and OpenTSDB. You can set up a reverse proxy with
|
|
||||||
[Nginx](/docs/Running-behind-nginx.md).
|
|
||||||
|
|
||||||
After your proxy is configured, make the following changes to `netdata.conf`:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
type = opentsdb:https
|
|
||||||
destination = localhost:8082
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example, we used the port `8082` for our reverse proxy. If your reverse proxy listens on a different port,
|
|
||||||
change the `destination = localhost:8082` line accordingly.
|
|
||||||
|
|
||||||
@ -1,205 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "opentsdb.h"
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// opentsdb backend
|
|
||||||
|
|
||||||
int backends_format_dimension_collected_opentsdb_telnet(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
(void)after;
|
|
||||||
(void)before;
|
|
||||||
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
buffer_sprintf(
|
|
||||||
b
|
|
||||||
, "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s\n"
|
|
||||||
, prefix
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (unsigned long long)rd->last_collected_time.tv_sec
|
|
||||||
, rd->last_collected_value
|
|
||||||
, hostname
|
|
||||||
, (host->tags)?" ":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_format_dimension_stored_opentsdb_telnet(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
if(!isnan(value)) {
|
|
||||||
|
|
||||||
buffer_sprintf(
|
|
||||||
b
|
|
||||||
, "put %s.%s.%s %llu " CALCULATED_NUMBER_FORMAT " host=%s%s%s\n"
|
|
||||||
, prefix
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (unsigned long long) last_t
|
|
||||||
, value
|
|
||||||
, hostname
|
|
||||||
, (host->tags)?" ":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int process_opentsdb_response(BUFFER *b) {
|
|
||||||
return discard_response(b, "opentsdb");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void opentsdb_build_message(BUFFER *b, char *message, const char *hostname, int length) {
|
|
||||||
buffer_sprintf(
|
|
||||||
b
|
|
||||||
, "POST /api/put HTTP/1.1\r\n"
|
|
||||||
"Host: %s\r\n"
|
|
||||||
"Content-Type: application/json\r\n"
|
|
||||||
"Content-Length: %d\r\n"
|
|
||||||
"\r\n"
|
|
||||||
"%s"
|
|
||||||
, hostname
|
|
||||||
, length
|
|
||||||
, message
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_format_dimension_collected_opentsdb_http(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
(void)after;
|
|
||||||
(void)before;
|
|
||||||
|
|
||||||
char message[1024];
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
int length = snprintfz(message
|
|
||||||
, sizeof(message)
|
|
||||||
, "{"
|
|
||||||
" \"metric\": \"%s.%s.%s\","
|
|
||||||
" \"timestamp\": %llu,"
|
|
||||||
" \"value\": "COLLECTED_NUMBER_FORMAT ","
|
|
||||||
" \"tags\": {"
|
|
||||||
" \"host\": \"%s%s%s\""
|
|
||||||
" }"
|
|
||||||
"}"
|
|
||||||
, prefix
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (unsigned long long)rd->last_collected_time.tv_sec
|
|
||||||
, rd->last_collected_value
|
|
||||||
, hostname
|
|
||||||
, (host->tags)?" ":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
);
|
|
||||||
|
|
||||||
if(length > 0) {
|
|
||||||
opentsdb_build_message(b, message, hostname, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_format_dimension_stored_opentsdb_http(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
) {
|
|
||||||
(void)host;
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
if(!isnan(value)) {
|
|
||||||
char chart_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
char dimension_name[RRD_ID_LENGTH_MAX + 1];
|
|
||||||
backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
|
|
||||||
backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
|
|
||||||
|
|
||||||
char message[1024];
|
|
||||||
int length = snprintfz(message
|
|
||||||
, sizeof(message)
|
|
||||||
, "{"
|
|
||||||
" \"metric\": \"%s.%s.%s\","
|
|
||||||
" \"timestamp\": %llu,"
|
|
||||||
" \"value\": "CALCULATED_NUMBER_FORMAT ","
|
|
||||||
" \"tags\": {"
|
|
||||||
" \"host\": \"%s%s%s\""
|
|
||||||
" }"
|
|
||||||
"}"
|
|
||||||
, prefix
|
|
||||||
, chart_name
|
|
||||||
, dimension_name
|
|
||||||
, (unsigned long long)last_t
|
|
||||||
, value
|
|
||||||
, hostname
|
|
||||||
, (host->tags)?" ":""
|
|
||||||
, (host->tags)?host->tags:""
|
|
||||||
);
|
|
||||||
|
|
||||||
if(length > 0) {
|
|
||||||
opentsdb_build_message(b, message, hostname, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,58 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_OPENTSDB_H
|
|
||||||
#define NETDATA_BACKEND_OPENTSDB_H
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
|
|
||||||
extern int backends_format_dimension_collected_opentsdb_telnet(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int backends_format_dimension_stored_opentsdb_telnet(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
extern int process_opentsdb_response(BUFFER *b);
|
|
||||||
|
|
||||||
int backends_format_dimension_collected_opentsdb_http(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
int backends_format_dimension_stored_opentsdb_http(
|
|
||||||
BUFFER *b // the buffer to write data to
|
|
||||||
, const char *prefix // the prefix to use
|
|
||||||
, RRDHOST *host // the host this chart comes from
|
|
||||||
, const char *hostname // the hostname (to override host->hostname)
|
|
||||||
, RRDSET *st // the chart
|
|
||||||
, RRDDIM *rd // the dimension
|
|
||||||
, time_t after // the start timestamp
|
|
||||||
, time_t before // the end timestamp
|
|
||||||
, BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
|
|
||||||
);
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_OPENTSDB_H
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
|
|
||||||
SUBDIRS = \
|
|
||||||
remote_write \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
dist_noinst_DATA = \
|
|
||||||
README.md \
|
|
||||||
$(NULL)
|
|
||||||
@ -1,457 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Using Netdata with Prometheus"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/prometheus/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Using Netdata with Prometheus
|
|
||||||
|
|
||||||
> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.7. The new prometheus backend
|
|
||||||
> for Netdata supports a lot more features and is aligned to the development of the rest of the Netdata backends.
|
|
||||||
|
|
||||||
Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently
|
|
||||||
Netdata added support for Prometheus. I'm going to quickly show you how to install both Netdata and prometheus on the
|
|
||||||
same server. We can then use grafana pointed at Prometheus to obtain long term metrics Netdata offers. I'm assuming we
|
|
||||||
are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
|
|
||||||
|
|
||||||
## Installing Netdata and prometheus
|
|
||||||
|
|
||||||
### Installing Netdata
|
|
||||||
|
|
||||||
There are number of ways to install Netdata according to [Installation](/packaging/installer/README.md). The suggested way
|
|
||||||
of installing the latest Netdata and keep it upgrade automatically. Using one line installation:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
bash <(curl -Ss https://my-netdata.io/kickstart.sh)
|
|
||||||
```
|
|
||||||
|
|
||||||
At this point we should have Netdata listening on port 19999. Attempt to take your browser here:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
http://your.netdata.ip:19999
|
|
||||||
```
|
|
||||||
|
|
||||||
_(replace `your.netdata.ip` with the IP or hostname of the server running Netdata)_
|
|
||||||
|
|
||||||
### Installing Prometheus
|
|
||||||
|
|
||||||
In order to install prometheus we are going to introduce our own systemd startup script along with an example of
|
|
||||||
prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape
|
|
||||||
Netdata's api. Prometheus is always a pull model meaning Netdata is the passive client within this architecture.
|
|
||||||
Prometheus always initiates the connection with Netdata.
|
|
||||||
|
|
||||||
#### Download Prometheus
|
|
||||||
|
|
||||||
```sh
|
|
||||||
cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
|
|
||||||
| grep "browser_download_url.*linux-amd64.tar.gz" \
|
|
||||||
| cut -d '"' -f 4 \
|
|
||||||
| wget -qi -
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Create prometheus system user
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo useradd -r prometheus
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Create prometheus directory
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo mkdir /opt/prometheus
|
|
||||||
sudo chown prometheus:prometheus /opt/prometheus
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Untar prometheus directory
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Install prometheus.yml
|
|
||||||
|
|
||||||
We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
|
|
||||||
|
|
||||||
Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# my global config
|
|
||||||
global:
|
|
||||||
scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
|
|
||||||
evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
|
|
||||||
# scrape_timeout is set to the global default (10s).
|
|
||||||
|
|
||||||
# Attach these labels to any time series or alerts when communicating with
|
|
||||||
# external systems (federation, remote storage, Alertmanager).
|
|
||||||
external_labels:
|
|
||||||
monitor: 'codelab-monitor'
|
|
||||||
|
|
||||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
|
||||||
rule_files:
|
|
||||||
# - "first.rules"
|
|
||||||
# - "second.rules"
|
|
||||||
|
|
||||||
# A scrape configuration containing exactly one endpoint to scrape:
|
|
||||||
# Here it's Prometheus itself.
|
|
||||||
scrape_configs:
|
|
||||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
|
||||||
- job_name: 'prometheus'
|
|
||||||
|
|
||||||
# metrics_path defaults to '/metrics'
|
|
||||||
# scheme defaults to 'http'.
|
|
||||||
|
|
||||||
static_configs:
|
|
||||||
- targets: ['0.0.0.0:9090']
|
|
||||||
|
|
||||||
- job_name: 'netdata-scrape'
|
|
||||||
|
|
||||||
metrics_path: '/api/v1/allmetrics'
|
|
||||||
params:
|
|
||||||
# format: prometheus | prometheus_all_hosts
|
|
||||||
# You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
|
|
||||||
format: [prometheus]
|
|
||||||
#
|
|
||||||
# source: as-collected | raw | average | sum | volume
|
|
||||||
# default is: average
|
|
||||||
#source: [as-collected]
|
|
||||||
#
|
|
||||||
# server name for this prometheus - the default is the client IP
|
|
||||||
# for Netdata to uniquely identify it
|
|
||||||
#server: ['prometheus1']
|
|
||||||
honor_labels: true
|
|
||||||
|
|
||||||
static_configs:
|
|
||||||
- targets: ['{your.netdata.ip}:19999']
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Install nodes.yml
|
|
||||||
|
|
||||||
The following is completely optional, it will enable Prometheus to generate alerts from some Netdata sources. Tweak the
|
|
||||||
values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and
|
|
||||||
add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
groups:
|
|
||||||
- name: nodes
|
|
||||||
|
|
||||||
rules:
|
|
||||||
- alert: node_high_cpu_usage_70
|
|
||||||
expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70
|
|
||||||
for: 1m
|
|
||||||
annotations:
|
|
||||||
description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
|
|
||||||
summary: CPU alert for container node '{{ $labels.job }}'
|
|
||||||
|
|
||||||
- alert: node_high_memory_usage_70
|
|
||||||
expr: 100 / sum(netdata_system_ram_MB_average) by (job)
|
|
||||||
* sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
|
|
||||||
for: 1m
|
|
||||||
annotations:
|
|
||||||
description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
|
|
||||||
summary: Memory alert for container node '{{ $labels.job }}'
|
|
||||||
|
|
||||||
- alert: node_low_root_filesystem_space_20
|
|
||||||
expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
|
|
||||||
* sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
|
|
||||||
for: 1m
|
|
||||||
annotations:
|
|
||||||
description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
|
|
||||||
summary: Root filesystem alert for container node '{{ $labels.job }}'
|
|
||||||
|
|
||||||
- alert: node_root_filesystem_fill_rate_6h
|
|
||||||
expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
|
|
||||||
for: 1h
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
|
|
||||||
summary: Disk fill alert for Swarm node '{{ $labels.job }}'
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Install prometheus.service
|
|
||||||
|
|
||||||
Save this service file as `/etc/systemd/system/prometheus.service`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
[Unit]
|
|
||||||
Description=Prometheus Server
|
|
||||||
AssertPathExists=/opt/prometheus
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
WorkingDirectory=/opt/prometheus
|
|
||||||
User=prometheus
|
|
||||||
Group=prometheus
|
|
||||||
ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info
|
|
||||||
ExecReload=/bin/kill -SIGHUP $MAINPID
|
|
||||||
ExecStop=/bin/kill -SIGINT $MAINPID
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Start Prometheus
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo systemctl start prometheus
|
|
||||||
sudo systemctl enable prometheus
|
|
||||||
```
|
|
||||||
|
|
||||||
Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
|
|
||||||
|
|
||||||
If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click
|
|
||||||
this and click on 'targets' We should see the Netdata host as a scraped target.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Netdata support for prometheus
|
|
||||||
|
|
||||||
> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.6. The new format allows easier
|
|
||||||
> queries for metrics and supports both `as collected` and normalized metrics.
|
|
||||||
|
|
||||||
Before explaining the changes, we have to understand the key differences between Netdata and prometheus.
|
|
||||||
|
|
||||||
### understanding Netdata metrics
|
|
||||||
|
|
||||||
#### charts
|
|
||||||
|
|
||||||
Each chart in Netdata has several properties (common to all its metrics):
|
|
||||||
|
|
||||||
- `chart_id` - uniquely identifies a chart.
|
|
||||||
|
|
||||||
- `chart_name` - a more human friendly name for `chart_id`, also unique.
|
|
||||||
|
|
||||||
- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
|
|
||||||
have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
|
|
||||||
|
|
||||||
- `family` groups a set of charts together. It is used as the submenu of the dashboard.
|
|
||||||
|
|
||||||
- `units` is the units for all the metrics attached to the chart.
|
|
||||||
|
|
||||||
#### dimensions
|
|
||||||
|
|
||||||
Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
|
|
||||||
measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
|
|
||||||
they are both in the same chart).
|
|
||||||
|
|
||||||
### Netdata data source
|
|
||||||
|
|
||||||
Netdata can send metrics to prometheus from 3 data sources:
|
|
||||||
|
|
||||||
- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is
|
|
||||||
done by Netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by
|
|
||||||
prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
|
|
||||||
to get meaningful values out of them.
|
|
||||||
|
|
||||||
The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
|
|
||||||
|
|
||||||
If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
|
|
||||||
|
|
||||||
Unlike prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
|
|
||||||
(`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
|
|
||||||
format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
|
|
||||||
|
|
||||||
- `average` - this data source uses the Netdata database to send the metrics to prometheus as they are presented on
|
|
||||||
the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
|
|
||||||
dashboard charts. This is the easiest to work with.
|
|
||||||
|
|
||||||
The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
|
|
||||||
|
|
||||||
When this source is used, Netdata keeps track of the last access time for each prometheus server fetching the
|
|
||||||
metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the
|
|
||||||
time-frame the `average` will be calculated.
|
|
||||||
|
|
||||||
So, no matter how frequently prometheus scrapes Netdata, it will get all the database data.
|
|
||||||
To identify each prometheus server, Netdata uses by default the IP of the client fetching the metrics.
|
|
||||||
|
|
||||||
If there are multiple prometheus servers fetching data from the same Netdata, using the same IP, each prometheus
|
|
||||||
server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server.
|
|
||||||
|
|
||||||
- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
|
|
||||||
|
|
||||||
The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
|
|
||||||
other operations are the same with `average`.
|
|
||||||
|
|
||||||
To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
|
|
||||||
e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
|
|
||||||
|
|
||||||
Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
|
|
||||||
|
|
||||||
### Querying Metrics
|
|
||||||
|
|
||||||
Fetch with your web browser this URL:
|
|
||||||
|
|
||||||
`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
|
|
||||||
|
|
||||||
_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
|
|
||||||
|
|
||||||
Netdata will respond with all the metrics it sends to prometheus.
|
|
||||||
|
|
||||||
If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to prometheus for this
|
|
||||||
chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
|
|
||||||
such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
|
|
||||||
|
|
||||||
Searching for `"system.cpu"` reveals:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
|
|
||||||
# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
|
|
||||||
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
|
|
||||||
```
|
|
||||||
|
|
||||||
_(Netdata response for `system.cpu` with source=`average`)_
|
|
||||||
|
|
||||||
In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the
|
|
||||||
'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
|
|
||||||
that the text form begins to auto-fill as prometheus knows about this metric.
|
|
||||||
|
|
||||||
If the data source was `as collected`, the response would be:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
|
|
||||||
# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
|
|
||||||
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
|
|
||||||
```
|
|
||||||
|
|
||||||
_(Netdata response for `system.cpu` with source=`as-collected`)_
|
|
||||||
|
|
||||||
For more information check prometheus documentation.
|
|
||||||
|
|
||||||
### Streaming data from upstream hosts
|
|
||||||
|
|
||||||
The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
|
|
||||||
functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
|
|
||||||
**prometheus.yml**:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
metrics_path: '/api/v1/allmetrics'
|
|
||||||
params:
|
|
||||||
format: [prometheus_all_hosts]
|
|
||||||
honor_labels: true
|
|
||||||
```
|
|
||||||
|
|
||||||
This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
|
|
||||||
provided.
|
|
||||||
|
|
||||||
### Timestamps
|
|
||||||
|
|
||||||
To pass the metrics through prometheus pushgateway, Netdata supports the option `×tamps=no` to send the metrics
|
|
||||||
without timestamps.
|
|
||||||
|
|
||||||
## Netdata host variables
|
|
||||||
|
|
||||||
Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
|
|
||||||
files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default.
|
|
||||||
|
|
||||||
To expose them, append `variables=yes` to the Netdata URL.
|
|
||||||
|
|
||||||
### TYPE and HELP
|
|
||||||
|
|
||||||
To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
|
|
||||||
wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
|
|
||||||
`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
|
|
||||||
|
|
||||||
Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against the Prometheus documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
|
|
||||||
|
|
||||||
### Names and IDs
|
|
||||||
|
|
||||||
Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
|
|
||||||
names are human friendly labels (also unique).
|
|
||||||
|
|
||||||
Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
|
|
||||||
interrupts, QoS classes, statsd synthetic charts, etc.
|
|
||||||
|
|
||||||
The default is controlled in `netdata.conf`:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
send names instead of ids = yes | no
|
|
||||||
```
|
|
||||||
|
|
||||||
You can overwrite it from prometheus, by appending to the URL:
|
|
||||||
|
|
||||||
- `&names=no` to get IDs (the old behaviour)
|
|
||||||
- `&names=yes` to get names
|
|
||||||
|
|
||||||
### Filtering metrics sent to prometheus
|
|
||||||
|
|
||||||
Netdata can filter the metrics it sends to prometheus with this setting:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
send charts matching = *
|
|
||||||
```
|
|
||||||
|
|
||||||
This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern
|
|
||||||
can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with `!` give a negative match
|
|
||||||
(e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is
|
|
||||||
important: the first match (positive or negative) left to right, is used.
|
|
||||||
|
|
||||||
### Changing the prefix of Netdata metrics
|
|
||||||
|
|
||||||
Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
prefix = netdata
|
|
||||||
```
|
|
||||||
|
|
||||||
It can also be changed from the URL, by appending `&prefix=netdata`.
|
|
||||||
|
|
||||||
### Metric Units
|
|
||||||
|
|
||||||
The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
|
|
||||||
units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
|
|
||||||
|
|
||||||
The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
|
|
||||||
before v1.12, append to the URL `&oldunits=yes`
|
|
||||||
|
|
||||||
### Accuracy of `average` and `sum` data sources
|
|
||||||
|
|
||||||
When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing prometheus
|
|
||||||
metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
|
|
||||||
that. This means that prometheus servers are not losing data when they access Netdata with data source = `average` or
|
|
||||||
`sum`.
|
|
||||||
|
|
||||||
To uniquely identify each prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
|
|
||||||
is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing Netdata
|
|
||||||
through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append
|
|
||||||
`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each prometheus server and keep track of
|
|
||||||
its last access time.
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,797 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#define BACKENDS_INTERNALS
|
|
||||||
#include "backend_prometheus.h"
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// PROMETHEUS
|
|
||||||
// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
|
|
||||||
|
|
||||||
static struct prometheus_server {
|
|
||||||
const char *server;
|
|
||||||
uint32_t hash;
|
|
||||||
RRDHOST *host;
|
|
||||||
time_t last_access;
|
|
||||||
struct prometheus_server *next;
|
|
||||||
} *prometheus_server_root = NULL;
|
|
||||||
|
|
||||||
static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now) {
|
|
||||||
static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
|
|
||||||
|
|
||||||
uint32_t hash = simple_hash(server);
|
|
||||||
|
|
||||||
netdata_mutex_lock(&prometheus_server_root_mutex);
|
|
||||||
|
|
||||||
struct prometheus_server *ps;
|
|
||||||
for(ps = prometheus_server_root; ps ;ps = ps->next) {
|
|
||||||
if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) {
|
|
||||||
time_t last = ps->last_access;
|
|
||||||
ps->last_access = now;
|
|
||||||
netdata_mutex_unlock(&prometheus_server_root_mutex);
|
|
||||||
return last;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ps = callocz(1, sizeof(struct prometheus_server));
|
|
||||||
ps->server = strdupz(server);
|
|
||||||
ps->hash = hash;
|
|
||||||
ps->host = host;
|
|
||||||
ps->last_access = now;
|
|
||||||
ps->next = prometheus_server_root;
|
|
||||||
prometheus_server_root = ps;
|
|
||||||
|
|
||||||
netdata_mutex_unlock(&prometheus_server_root_mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_t usable) {
|
|
||||||
size_t n;
|
|
||||||
|
|
||||||
for(n = 0; *s && n < usable ; d++, s++, n++) {
|
|
||||||
register char c = *s;
|
|
||||||
|
|
||||||
if(!isalnum(c)) *d = '_';
|
|
||||||
else *d = c;
|
|
||||||
}
|
|
||||||
*d = '\0';
|
|
||||||
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t backends_prometheus_label_copy(char *d, const char *s, size_t usable) {
|
|
||||||
size_t n;
|
|
||||||
|
|
||||||
// make sure we can escape one character without overflowing the buffer
|
|
||||||
usable--;
|
|
||||||
|
|
||||||
for(n = 0; *s && n < usable ; d++, s++, n++) {
|
|
||||||
register char c = *s;
|
|
||||||
|
|
||||||
if(unlikely(c == '"' || c == '\\' || c == '\n')) {
|
|
||||||
*d++ = '\\';
|
|
||||||
n++;
|
|
||||||
}
|
|
||||||
*d = c;
|
|
||||||
}
|
|
||||||
*d = '\0';
|
|
||||||
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline char *backends_prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
|
|
||||||
const char *sorig = s;
|
|
||||||
char *ret = d;
|
|
||||||
size_t n;
|
|
||||||
|
|
||||||
// Fix for issue 5227
|
|
||||||
if (unlikely(showoldunits)) {
|
|
||||||
static struct {
|
|
||||||
const char *newunit;
|
|
||||||
uint32_t hash;
|
|
||||||
const char *oldunit;
|
|
||||||
} units[] = {
|
|
||||||
{"KiB/s", 0, "kilobytes/s"}
|
|
||||||
, {"MiB/s", 0, "MB/s"}
|
|
||||||
, {"GiB/s", 0, "GB/s"}
|
|
||||||
, {"KiB" , 0, "KB"}
|
|
||||||
, {"MiB" , 0, "MB"}
|
|
||||||
, {"GiB" , 0, "GB"}
|
|
||||||
, {"inodes" , 0, "Inodes"}
|
|
||||||
, {"percentage" , 0, "percent"}
|
|
||||||
, {"faults/s" , 0, "page faults/s"}
|
|
||||||
, {"KiB/operation", 0, "kilobytes per operation"}
|
|
||||||
, {"milliseconds/operation", 0, "ms per operation"}
|
|
||||||
, {NULL, 0, NULL}
|
|
||||||
};
|
|
||||||
static int initialized = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if(unlikely(!initialized)) {
|
|
||||||
for (i = 0; units[i].newunit; i++)
|
|
||||||
units[i].hash = simple_hash(units[i].newunit);
|
|
||||||
initialized = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t hash = simple_hash(s);
|
|
||||||
for(i = 0; units[i].newunit ; i++) {
|
|
||||||
if(unlikely(hash == units[i].hash && !strcmp(s, units[i].newunit))) {
|
|
||||||
// info("matched extension for filename '%s': '%s'", filename, last_dot);
|
|
||||||
s=units[i].oldunit;
|
|
||||||
sorig = s;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*d++ = '_';
|
|
||||||
for(n = 1; *s && n < usable ; d++, s++, n++) {
|
|
||||||
register char c = *s;
|
|
||||||
|
|
||||||
if(!isalnum(c)) *d = '_';
|
|
||||||
else *d = c;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(n == 2 && sorig[0] == '%') {
|
|
||||||
n = 0;
|
|
||||||
d = ret;
|
|
||||||
s = "_percent";
|
|
||||||
for( ; *s && n < usable ; n++) *d++ = *s++;
|
|
||||||
}
|
|
||||||
else if(n > 3 && sorig[n-3] == '/' && sorig[n-2] == 's') {
|
|
||||||
n = n - 2;
|
|
||||||
d -= 2;
|
|
||||||
s = "_persec";
|
|
||||||
for( ; *s && n < usable ; n++) *d++ = *s++;
|
|
||||||
}
|
|
||||||
|
|
||||||
*d = '\0';
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#define PROMETHEUS_ELEMENT_MAX 256
|
|
||||||
#define PROMETHEUS_LABELS_MAX 1024
|
|
||||||
#define PROMETHEUS_VARIABLE_MAX 256
|
|
||||||
|
|
||||||
#define PROMETHEUS_LABELS_MAX_NUMBER 128
|
|
||||||
|
|
||||||
struct host_variables_callback_options {
|
|
||||||
RRDHOST *host;
|
|
||||||
BUFFER *wb;
|
|
||||||
BACKEND_OPTIONS backend_options;
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options;
|
|
||||||
const char *prefix;
|
|
||||||
const char *labels;
|
|
||||||
time_t now;
|
|
||||||
int host_header_printed;
|
|
||||||
char name[PROMETHEUS_VARIABLE_MAX+1];
|
|
||||||
};
|
|
||||||
|
|
||||||
static int print_host_variables(RRDVAR *rv, void *data) {
|
|
||||||
struct host_variables_callback_options *opts = data;
|
|
||||||
|
|
||||||
if(rv->options & (RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_CUSTOM_CHART_VAR)) {
|
|
||||||
if(!opts->host_header_printed) {
|
|
||||||
opts->host_header_printed = 1;
|
|
||||||
|
|
||||||
if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP) {
|
|
||||||
buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
calculated_number value = rrdvar2number(rv);
|
|
||||||
if(isnan(value) || isinf(value)) {
|
|
||||||
if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP)
|
|
||||||
buffer_sprintf(opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rv->name, (isnan(value))?"NAN":"INF");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *label_pre = "";
|
|
||||||
char *label_post = "";
|
|
||||||
if(opts->labels && *opts->labels) {
|
|
||||||
label_pre = "{";
|
|
||||||
label_post = "}";
|
|
||||||
}
|
|
||||||
|
|
||||||
backends_prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
|
|
||||||
|
|
||||||
if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(opts->wb
|
|
||||||
, "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, opts->prefix
|
|
||||||
, opts->name
|
|
||||||
, label_pre
|
|
||||||
, opts->labels
|
|
||||||
, label_post
|
|
||||||
, value
|
|
||||||
, opts->now * 1000ULL
|
|
||||||
);
|
|
||||||
else
|
|
||||||
buffer_sprintf(opts->wb, "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT "\n"
|
|
||||||
, opts->prefix
|
|
||||||
, opts->name
|
|
||||||
, label_pre
|
|
||||||
, opts->labels
|
|
||||||
, label_post
|
|
||||||
, value
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb, const char *prefix, BACKEND_OPTIONS backend_options, time_t after, time_t before, int allhosts, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
|
|
||||||
rrdhost_rdlock(host);
|
|
||||||
|
|
||||||
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
backends_prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
char labels[PROMETHEUS_LABELS_MAX + 1] = "";
|
|
||||||
if(allhosts) {
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
else
|
|
||||||
buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version);
|
|
||||||
|
|
||||||
if(host->tags && *(host->tags)) {
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS) {
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
|
|
||||||
// deprecated, exists only for compatibility with older queries
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags);
|
|
||||||
|
|
||||||
// deprecated, exists only for compatibility with older queries
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
else
|
|
||||||
buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version);
|
|
||||||
|
|
||||||
if(host->tags && *(host->tags)) {
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS) {
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
|
|
||||||
// deprecated, exists only for compatibility with older queries
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags);
|
|
||||||
|
|
||||||
// deprecated, exists only for compatibility with older queries
|
|
||||||
buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// send custom variables set for the host
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_VARIABLES){
|
|
||||||
struct host_variables_callback_options opts = {
|
|
||||||
.host = host,
|
|
||||||
.wb = wb,
|
|
||||||
.labels = (labels[0] == ',')?&labels[1]:labels,
|
|
||||||
.backend_options = backend_options,
|
|
||||||
.output_options = output_options,
|
|
||||||
.prefix = prefix,
|
|
||||||
.now = now_realtime_sec(),
|
|
||||||
.host_header_printed = 0
|
|
||||||
};
|
|
||||||
foreach_host_variable_callback(host, print_host_variables, &opts);
|
|
||||||
}
|
|
||||||
|
|
||||||
// for each chart
|
|
||||||
RRDSET *st;
|
|
||||||
rrdset_foreach_read(st, host) {
|
|
||||||
char chart[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char context[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char family[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
if(likely(backends_can_send_rrdset(backend_options, st))) {
|
|
||||||
rrdset_rdlock(st);
|
|
||||||
|
|
||||||
int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
|
|
||||||
int homogeneous = 1;
|
|
||||||
if(as_collected) {
|
|
||||||
if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
|
|
||||||
rrdset_update_heterogeneous_flag(st);
|
|
||||||
|
|
||||||
if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
|
|
||||||
homogeneous = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE && !(output_options & BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS))
|
|
||||||
backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
|
|
||||||
buffer_sprintf(wb, "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n"
|
|
||||||
, (homogeneous)?"homogeneous":"heterogeneous"
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
|
|
||||||
, st->context
|
|
||||||
, st->family
|
|
||||||
, st->units
|
|
||||||
);
|
|
||||||
|
|
||||||
// for each dimension
|
|
||||||
RRDDIM *rd;
|
|
||||||
rrddim_foreach_read(rd, st) {
|
|
||||||
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
|
|
||||||
char dimension[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char *suffix = "";
|
|
||||||
|
|
||||||
if (as_collected) {
|
|
||||||
// we need as-collected / raw data
|
|
||||||
|
|
||||||
if(unlikely(rd->last_collected_time.tv_sec < after))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
const char *t = "gauge", *h = "gives";
|
|
||||||
if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
|
|
||||||
rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
|
|
||||||
t = "counter";
|
|
||||||
h = "delta gives";
|
|
||||||
suffix = "_total";
|
|
||||||
}
|
|
||||||
|
|
||||||
if(homogeneous) {
|
|
||||||
// all the dimensions of the chart, has the same algorithm, multiplier and divisor
|
|
||||||
// we add all dimensions as labels
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "# COMMENT %s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, suffix
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
|
|
||||||
, st->context
|
|
||||||
, st->family
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
|
|
||||||
, rd->multiplier
|
|
||||||
, rd->divisor
|
|
||||||
, h
|
|
||||||
, st->units
|
|
||||||
, t
|
|
||||||
);
|
|
||||||
|
|
||||||
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
|
|
||||||
buffer_sprintf(wb, "# TYPE %s_%s%s %s\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, suffix
|
|
||||||
, t
|
|
||||||
);
|
|
||||||
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, dimension
|
|
||||||
, labels
|
|
||||||
, rd->last_collected_value
|
|
||||||
, timeval_msec(&rd->last_collected_time)
|
|
||||||
);
|
|
||||||
else
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, dimension
|
|
||||||
, labels
|
|
||||||
, rd->last_collected_value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
|
|
||||||
// we create a metric per dimension
|
|
||||||
|
|
||||||
backends_prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "# COMMENT %s_%s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, dimension
|
|
||||||
, suffix
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
|
|
||||||
, st->context
|
|
||||||
, st->family
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
|
|
||||||
, rd->multiplier
|
|
||||||
, rd->divisor
|
|
||||||
, h
|
|
||||||
, st->units
|
|
||||||
, t
|
|
||||||
);
|
|
||||||
|
|
||||||
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
|
|
||||||
buffer_sprintf(wb, "# TYPE %s_%s_%s%s %s\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, dimension
|
|
||||||
, suffix
|
|
||||||
, t
|
|
||||||
);
|
|
||||||
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, dimension
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, labels
|
|
||||||
, rd->last_collected_value
|
|
||||||
, timeval_msec(&rd->last_collected_time)
|
|
||||||
);
|
|
||||||
else
|
|
||||||
buffer_sprintf(wb
|
|
||||||
, "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, dimension
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, labels
|
|
||||||
, rd->last_collected_value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// we need average or sum of the data
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
if(!isnan(value) && !isinf(value)) {
|
|
||||||
|
|
||||||
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
|
|
||||||
suffix = "_average";
|
|
||||||
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
|
|
||||||
suffix = "_sum";
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
|
|
||||||
buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, units
|
|
||||||
, suffix
|
|
||||||
, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
|
|
||||||
, st->units
|
|
||||||
, (unsigned long long)first_t
|
|
||||||
, (unsigned long long)last_t
|
|
||||||
);
|
|
||||||
|
|
||||||
if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
|
|
||||||
buffer_sprintf(wb, "# TYPE %s_%s%s%s gauge\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, units
|
|
||||||
, suffix
|
|
||||||
);
|
|
||||||
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
|
|
||||||
buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT " %llu\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, units
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, dimension
|
|
||||||
, labels
|
|
||||||
, value
|
|
||||||
, last_t * MSEC_PER_SEC
|
|
||||||
);
|
|
||||||
else
|
|
||||||
buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT "\n"
|
|
||||||
, prefix
|
|
||||||
, context
|
|
||||||
, units
|
|
||||||
, suffix
|
|
||||||
, chart
|
|
||||||
, family
|
|
||||||
, dimension
|
|
||||||
, labels
|
|
||||||
, value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rrdset_unlock(st);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rrdhost_unlock(host);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if ENABLE_PROMETHEUS_REMOTE_WRITE
|
|
||||||
inline static void remote_write_split_words(char *str, char **words, int max_words) {
|
|
||||||
char *s = str;
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
while(*s && i < max_words - 1) {
|
|
||||||
while(*s && isspace(*s)) s++; // skip spaces to the beginning of a tag name
|
|
||||||
|
|
||||||
if(*s)
|
|
||||||
words[i] = s;
|
|
||||||
|
|
||||||
while(*s && !isspace(*s) && *s != '=') s++; // find the end of the tag name
|
|
||||||
|
|
||||||
if(*s != '=') {
|
|
||||||
words[i] = NULL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
*s = '\0';
|
|
||||||
s++;
|
|
||||||
i++;
|
|
||||||
|
|
||||||
while(*s && isspace(*s)) s++; // skip spaces to the beginning of a tag value
|
|
||||||
|
|
||||||
if(*s && *s == '"') s++; // strip an opening quote
|
|
||||||
if(*s)
|
|
||||||
words[i] = s;
|
|
||||||
|
|
||||||
while(*s && !isspace(*s) && *s != ',') s++; // find the end of the tag value
|
|
||||||
|
|
||||||
if(*s && *s != ',') {
|
|
||||||
words[i] = NULL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if(s != words[i] && *(s - 1) == '"') *(s - 1) = '\0'; // strip a closing quote
|
|
||||||
if(*s != '\0') {
|
|
||||||
*s = '\0';
|
|
||||||
s++;
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_rrd_stats_remote_write_allmetrics_prometheus(
|
|
||||||
RRDHOST *host
|
|
||||||
, const char *__hostname
|
|
||||||
, const char *prefix
|
|
||||||
, BACKEND_OPTIONS backend_options
|
|
||||||
, time_t after
|
|
||||||
, time_t before
|
|
||||||
, size_t *count_charts
|
|
||||||
, size_t *count_dims
|
|
||||||
, size_t *count_dims_skipped
|
|
||||||
) {
|
|
||||||
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
backends_prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
backends_add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
|
|
||||||
if(host->tags && *(host->tags)) {
|
|
||||||
char tags[PROMETHEUS_LABELS_MAX + 1];
|
|
||||||
strncpy(tags, host->tags, PROMETHEUS_LABELS_MAX);
|
|
||||||
char *words[PROMETHEUS_LABELS_MAX_NUMBER] = {NULL};
|
|
||||||
int i;
|
|
||||||
|
|
||||||
remote_write_split_words(tags, words, PROMETHEUS_LABELS_MAX_NUMBER);
|
|
||||||
|
|
||||||
backends_add_host_info("netdata_host_tags_info", hostname, NULL, NULL, now_realtime_usec() / USEC_PER_MS);
|
|
||||||
|
|
||||||
for(i = 0; words[i] != NULL && words[i + 1] != NULL && (i + 1) < PROMETHEUS_LABELS_MAX_NUMBER; i += 2) {
|
|
||||||
backends_add_tag(words[i], words[i + 1]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// for each chart
|
|
||||||
RRDSET *st;
|
|
||||||
rrdset_foreach_read(st, host) {
|
|
||||||
char chart[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char context[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char family[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
|
|
||||||
if(likely(backends_can_send_rrdset(backend_options, st))) {
|
|
||||||
rrdset_rdlock(st);
|
|
||||||
|
|
||||||
(*count_charts)++;
|
|
||||||
|
|
||||||
int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
|
|
||||||
int homogeneous = 1;
|
|
||||||
if(as_collected) {
|
|
||||||
if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
|
|
||||||
rrdset_update_heterogeneous_flag(st);
|
|
||||||
|
|
||||||
if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
|
|
||||||
homogeneous = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
|
|
||||||
backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// for each dimension
|
|
||||||
RRDDIM *rd;
|
|
||||||
rrddim_foreach_read(rd, st) {
|
|
||||||
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
|
|
||||||
char name[PROMETHEUS_LABELS_MAX + 1];
|
|
||||||
char dimension[PROMETHEUS_ELEMENT_MAX + 1];
|
|
||||||
char *suffix = "";
|
|
||||||
|
|
||||||
if (as_collected) {
|
|
||||||
// we need as-collected / raw data
|
|
||||||
|
|
||||||
if(unlikely(rd->last_collected_time.tv_sec < after)) {
|
|
||||||
debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
|
|
||||||
(*count_dims_skipped)++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(homogeneous) {
|
|
||||||
// all the dimensions of the chart, has the same algorithm, multiplier and divisor
|
|
||||||
// we add all dimensions as labels
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
|
|
||||||
|
|
||||||
backends_add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
|
|
||||||
(*count_dims)++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
|
|
||||||
// we create a metric per dimension
|
|
||||||
|
|
||||||
backends_prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
|
|
||||||
|
|
||||||
backends_add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
|
|
||||||
(*count_dims)++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// we need average or sum of the data
|
|
||||||
|
|
||||||
time_t first_t = after, last_t = before;
|
|
||||||
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
|
|
||||||
|
|
||||||
if(!isnan(value) && !isinf(value)) {
|
|
||||||
|
|
||||||
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
|
|
||||||
suffix = "_average";
|
|
||||||
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
|
|
||||||
suffix = "_sum";
|
|
||||||
|
|
||||||
backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
|
|
||||||
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
|
|
||||||
|
|
||||||
backends_add_metric(name, chart, family, dimension, hostname, value, last_t * MSEC_PER_SEC);
|
|
||||||
(*count_dims)++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rrdset_unlock(st);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
|
|
||||||
|
|
||||||
static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_OPTIONS backend_options, const char *server, time_t now, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
|
|
||||||
if(!server || !*server) server = "default";
|
|
||||||
|
|
||||||
time_t after = prometheus_server_last_access(server, host, now);
|
|
||||||
|
|
||||||
int first_seen = 0;
|
|
||||||
if(!after) {
|
|
||||||
after = now - global_backend_update_every;
|
|
||||||
first_seen = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(after > now) {
|
|
||||||
// oops! this should never happen
|
|
||||||
after = now - global_backend_update_every;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP) {
|
|
||||||
char *mode;
|
|
||||||
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
|
|
||||||
mode = "as collected";
|
|
||||||
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
|
|
||||||
mode = "average";
|
|
||||||
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
|
|
||||||
mode = "sum";
|
|
||||||
else
|
|
||||||
mode = "unknown";
|
|
||||||
|
|
||||||
buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s, time range %lu to %lu\n\n"
|
|
||||||
, host->hostname
|
|
||||||
, (first_seen)?"FIRST SEEN ":""
|
|
||||||
, server
|
|
||||||
, mode
|
|
||||||
, (unsigned long)((first_seen)?0:(now - after))
|
|
||||||
, (first_seen)?"never":"seconds ago"
|
|
||||||
, (unsigned long)after, (unsigned long)now
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return after;
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
|
|
||||||
time_t before = now_realtime_sec();
|
|
||||||
|
|
||||||
// we start at the point we had stopped before
|
|
||||||
time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options);
|
|
||||||
|
|
||||||
rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 0, output_options);
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
|
|
||||||
time_t before = now_realtime_sec();
|
|
||||||
|
|
||||||
// we start at the point we had stopped before
|
|
||||||
time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options);
|
|
||||||
|
|
||||||
rrd_rdlock();
|
|
||||||
rrdhost_foreach_read(host) {
|
|
||||||
rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 1, output_options);
|
|
||||||
}
|
|
||||||
rrd_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if ENABLE_PROMETHEUS_REMOTE_WRITE
|
|
||||||
int backends_process_prometheus_remote_write_response(BUFFER *b) {
|
|
||||||
if(unlikely(!b)) return 1;
|
|
||||||
|
|
||||||
const char *s = buffer_tostring(b);
|
|
||||||
int len = buffer_strlen(b);
|
|
||||||
|
|
||||||
// do nothing with HTTP responses 200 or 204
|
|
||||||
|
|
||||||
while(!isspace(*s) && len) {
|
|
||||||
s++;
|
|
||||||
len--;
|
|
||||||
}
|
|
||||||
s++;
|
|
||||||
len--;
|
|
||||||
|
|
||||||
if(likely(len > 4 && (!strncmp(s, "200 ", 4) || !strncmp(s, "204 ", 4))))
|
|
||||||
return 0;
|
|
||||||
else
|
|
||||||
return discard_response(b, "prometheus remote write");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_PROMETHEUS_H
|
|
||||||
#define NETDATA_BACKEND_PROMETHEUS_H 1
|
|
||||||
|
|
||||||
#include "backends/backends.h"
|
|
||||||
|
|
||||||
typedef enum backends_prometheus_output_flags {
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_NONE = 0,
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_HELP = (1 << 0),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_TYPES = (1 << 1),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_NAMES = (1 << 2),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_VARIABLES = (1 << 4),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
|
|
||||||
BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
|
|
||||||
} BACKENDS_PROMETHEUS_OUTPUT_OPTIONS;
|
|
||||||
|
|
||||||
extern void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options);
|
|
||||||
extern void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options);
|
|
||||||
|
|
||||||
#if ENABLE_PROMETHEUS_REMOTE_WRITE
|
|
||||||
extern void backends_rrd_stats_remote_write_allmetrics_prometheus(
|
|
||||||
RRDHOST *host
|
|
||||||
, const char *__hostname
|
|
||||||
, const char *prefix
|
|
||||||
, BACKEND_OPTIONS backend_options
|
|
||||||
, time_t after
|
|
||||||
, time_t before
|
|
||||||
, size_t *count_charts
|
|
||||||
, size_t *count_dims
|
|
||||||
, size_t *count_dims_skipped
|
|
||||||
);
|
|
||||||
extern int backends_process_prometheus_remote_write_response(BUFFER *b);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_PROMETHEUS_H
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
AUTOMAKE_OPTIONS = subdir-objects
|
|
||||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|
||||||
|
|
||||||
CLEANFILES = \
|
|
||||||
remote_write.pb.cc \
|
|
||||||
remote_write.pb.h \
|
|
||||||
$(NULL)
|
|
||||||
|
|
||||||
dist_noinst_DATA = \
|
|
||||||
remote_write.proto \
|
|
||||||
README.md \
|
|
||||||
$(NULL)
|
|
||||||
@ -1,41 +0,0 @@
|
|||||||
<!--
|
|
||||||
title: "Prometheus remote write backend"
|
|
||||||
custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/prometheus/remote_write/README.md
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Prometheus remote write backend
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To use the prometheus remote write API with [storage
|
|
||||||
providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)
|
|
||||||
[protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries
|
|
||||||
should be installed first. Next, Netdata should be re-installed from the source. The installer will detect that the
|
|
||||||
required libraries and utilities are now available.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
An additional option in the backend configuration section is available for the remote write backend:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
remote write URL path = /receive
|
|
||||||
```
|
|
||||||
|
|
||||||
The default value is `/receive`. `remote write URL path` is used to set an endpoint path for the remote write protocol.
|
|
||||||
For example, if your endpoint is `http://example.domain:example_port/storage/read` you should set
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[backend]
|
|
||||||
destination = example.domain:example_port
|
|
||||||
remote write URL path = /storage/read
|
|
||||||
```
|
|
||||||
|
|
||||||
`buffered` and `lost` dimensions in the Netdata Backend Data Size operation monitoring chart estimate uncompressed
|
|
||||||
buffer size on failures.
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
The remote write backend does not support `buffer on failures`
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,120 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#include <snappy.h>
|
|
||||||
#include "exporting/prometheus/remote_write/remote_write.pb.h"
|
|
||||||
#include "remote_write.h"
|
|
||||||
|
|
||||||
using namespace prometheus;
|
|
||||||
|
|
||||||
static google::protobuf::Arena arena;
|
|
||||||
static WriteRequest *write_request;
|
|
||||||
|
|
||||||
void backends_init_write_request() {
|
|
||||||
GOOGLE_PROTOBUF_VERIFY_VERSION;
|
|
||||||
write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_clear_write_request() {
|
|
||||||
write_request->clear_timeseries();
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp) {
|
|
||||||
TimeSeries *timeseries;
|
|
||||||
Sample *sample;
|
|
||||||
Label *label;
|
|
||||||
|
|
||||||
timeseries = write_request->add_timeseries();
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("__name__");
|
|
||||||
label->set_value(name);
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("instance");
|
|
||||||
label->set_value(instance);
|
|
||||||
|
|
||||||
if(application) {
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("application");
|
|
||||||
label->set_value(application);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(version) {
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("version");
|
|
||||||
label->set_value(version);
|
|
||||||
}
|
|
||||||
|
|
||||||
sample = timeseries->add_samples();
|
|
||||||
sample->set_value(1);
|
|
||||||
sample->set_timestamp(timestamp);
|
|
||||||
}
|
|
||||||
|
|
||||||
// adds tag to the last created timeseries
|
|
||||||
void backends_add_tag(char *tag, char *value) {
|
|
||||||
TimeSeries *timeseries;
|
|
||||||
Label *label;
|
|
||||||
|
|
||||||
timeseries = write_request->mutable_timeseries(write_request->timeseries_size() - 1);
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name(tag);
|
|
||||||
label->set_value(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp) {
|
|
||||||
TimeSeries *timeseries;
|
|
||||||
Sample *sample;
|
|
||||||
Label *label;
|
|
||||||
|
|
||||||
timeseries = write_request->add_timeseries();
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("__name__");
|
|
||||||
label->set_value(name);
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("chart");
|
|
||||||
label->set_value(chart);
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("family");
|
|
||||||
label->set_value(family);
|
|
||||||
|
|
||||||
if(dimension) {
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("dimension");
|
|
||||||
label->set_value(dimension);
|
|
||||||
}
|
|
||||||
|
|
||||||
label = timeseries->add_labels();
|
|
||||||
label->set_name("instance");
|
|
||||||
label->set_value(instance);
|
|
||||||
|
|
||||||
sample = timeseries->add_samples();
|
|
||||||
sample->set_value(value);
|
|
||||||
sample->set_timestamp(timestamp);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t backends_get_write_request_size(){
|
|
||||||
#if GOOGLE_PROTOBUF_VERSION < 3001000
|
|
||||||
size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSize());
|
|
||||||
#else
|
|
||||||
size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSizeLong());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (size < INT_MAX)?size:0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int backends_pack_write_request(char *buffer, size_t *size) {
|
|
||||||
std::string uncompressed_write_request;
|
|
||||||
if(write_request->SerializeToString(&uncompressed_write_request) == false) return 1;
|
|
||||||
|
|
||||||
snappy::RawCompress(uncompressed_write_request.data(), uncompressed_write_request.size(), buffer, size);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void backends_protocol_buffers_shutdown() {
|
|
||||||
google::protobuf::ShutdownProtobufLibrary();
|
|
||||||
}
|
|
||||||
@ -1,30 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
#ifndef NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
|
|
||||||
#define NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void backends_init_write_request();
|
|
||||||
|
|
||||||
void backends_clear_write_request();
|
|
||||||
|
|
||||||
void backends_add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
|
|
||||||
|
|
||||||
void backends_add_tag(char *tag, char *value);
|
|
||||||
|
|
||||||
void backends_add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp);
|
|
||||||
|
|
||||||
size_t backends_get_write_request_size();
|
|
||||||
|
|
||||||
int backends_pack_write_request(char *buffer, size_t *size);
|
|
||||||
|
|
||||||
void backends_protocol_buffers_shutdown();
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif //NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
package prometheus;
|
|
||||||
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
|
|
||||||
import "google/protobuf/descriptor.proto";
|
|
||||||
|
|
||||||
message WriteRequest {
|
|
||||||
repeated TimeSeries timeseries = 1 [(nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message TimeSeries {
|
|
||||||
repeated Label labels = 1 [(nullable) = false];
|
|
||||||
repeated Sample samples = 2 [(nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Label {
|
|
||||||
string name = 1;
|
|
||||||
string value = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Sample {
|
|
||||||
double value = 1;
|
|
||||||
int64 timestamp = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
extend google.protobuf.FieldOptions {
|
|
||||||
bool nullable = 65001;
|
|
||||||
}
|
|
||||||
@ -138,21 +138,6 @@
|
|||||||
[cloud]
|
[cloud]
|
||||||
# cloud base url = https://netdata.cloud
|
# cloud base url = https://netdata.cloud
|
||||||
|
|
||||||
[backend]
|
|
||||||
# host tags =
|
|
||||||
# enabled = no
|
|
||||||
# data source = average
|
|
||||||
# type = graphite
|
|
||||||
# destination = localhost
|
|
||||||
# prefix = netdata
|
|
||||||
# hostname = b073e16793c4
|
|
||||||
# update every = 10
|
|
||||||
# buffer on failures = 10
|
|
||||||
# timeout ms = 20000
|
|
||||||
# send names instead of ids = yes
|
|
||||||
# send charts matching = *
|
|
||||||
# send hosts matching = localhost *
|
|
||||||
|
|
||||||
[statsd]
|
[statsd]
|
||||||
# enabled = yes
|
# enabled = yes
|
||||||
# update every (flushInterval) = 1
|
# update every (flushInterval) = 1
|
||||||
|
|||||||
@ -40,10 +40,8 @@ our [collectors' configuration reference](/collectors/REFERENCE.md).
|
|||||||
**[Dashboards](/web/README.md)**: Visualize your newly-collect metrics in real-time using Netdata's [built-in
|
**[Dashboards](/web/README.md)**: Visualize your newly-collect metrics in real-time using Netdata's [built-in
|
||||||
dashboard](/web/gui/README.md).
|
dashboard](/web/gui/README.md).
|
||||||
|
|
||||||
**[Backends](/backends/README.md)**: Extend our built-in [database engine](/database/engine/README.md), which supports
|
**[Exporting](/exporting/README.md)**: Extend our built-in [database engine](/database/engine/README.md), which supports
|
||||||
long-term metrics storage, by archiving metrics to like Graphite, Prometheus, MongoDB, TimescaleDB, and more.
|
long-term metrics storage, by archiving metrics to external databases like Graphite, Prometheus, MongoDB, TimescaleDB, and more.
|
||||||
|
It can export metrics to multiple databases simultaneously.
|
||||||
**[Exporting](/exporting/README.md)**: An experimental refactoring of our backends system with a modular system and
|
|
||||||
support for exporting metrics to multiple systems simultaneously.
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -231,7 +231,7 @@ the template is:
|
|||||||
|
|
||||||
- `options`
|
- `options`
|
||||||
|
|
||||||
a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
|
a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to external databases). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
|
||||||
|
|
||||||
- `plugin` and `module`
|
- `plugin` and `module`
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ the template is:
|
|||||||
the `id` of this dimension (it is a text value, not numeric),
|
the `id` of this dimension (it is a text value, not numeric),
|
||||||
this will be needed later to add values to the dimension
|
this will be needed later to add values to the dimension
|
||||||
|
|
||||||
We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
|
We suggest to avoid using `.` in dimension ids. External databases expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
|
||||||
|
|
||||||
- `name`
|
- `name`
|
||||||
|
|
||||||
|
|||||||
@ -43,7 +43,7 @@
|
|||||||
# When a plugin sends the obsolete flag, the charts are not deleted
|
# When a plugin sends the obsolete flag, the charts are not deleted
|
||||||
# from netdata immediately.
|
# from netdata immediately.
|
||||||
# They will be hidden immediately (not offered to dashboard viewer,
|
# They will be hidden immediately (not offered to dashboard viewer,
|
||||||
# streamed upstream and archived to backends) and deleted one hour
|
# streamed upstream and archived to external databases) and deleted one hour
|
||||||
# later (configurable from netdata.conf).
|
# later (configurable from netdata.conf).
|
||||||
# chart_cleanup: 10
|
# chart_cleanup: 10
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,7 @@
|
|||||||
# A chart is marked as obsolete if it has not been updated
|
# A chart is marked as obsolete if it has not been updated
|
||||||
# 'chart_cleanup' iterations in a row.
|
# 'chart_cleanup' iterations in a row.
|
||||||
# They will be hidden immediately (not offered to dashboard viewer,
|
# They will be hidden immediately (not offered to dashboard viewer,
|
||||||
# streamed upstream and archived to backends) and deleted one hour
|
# streamed upstream and archived to external databases) and deleted one hour
|
||||||
# later (configurable from netdata.conf).
|
# later (configurable from netdata.conf).
|
||||||
# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
|
# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
|
||||||
# time charts
|
# time charts
|
||||||
|
|||||||
@ -35,7 +35,7 @@
|
|||||||
# A chart is marked as obsolete if it has not been updated
|
# A chart is marked as obsolete if it has not been updated
|
||||||
# 'chart_cleanup' iterations in a row.
|
# 'chart_cleanup' iterations in a row.
|
||||||
# They will be hidden immediately (not offered to dashboard viewer,
|
# They will be hidden immediately (not offered to dashboard viewer,
|
||||||
# streamed upstream and archived to backends) and deleted one hour
|
# streamed upstream and archived to external databases) and deleted one hour
|
||||||
# later (configurable from netdata.conf).
|
# later (configurable from netdata.conf).
|
||||||
# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
|
# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
|
||||||
chart_cleanup: 0
|
chart_cleanup: 0
|
||||||
|
|||||||
94
configure.ac
94
configure.ac
@ -71,10 +71,10 @@ AC_ARG_ENABLE(
|
|||||||
[enable_plugin_xenstat="detect"]
|
[enable_plugin_xenstat="detect"]
|
||||||
)
|
)
|
||||||
AC_ARG_ENABLE(
|
AC_ARG_ENABLE(
|
||||||
[backend-kinesis],
|
[exporting-kinesis],
|
||||||
[AS_HELP_STRING([--enable-backend-kinesis], [enable kinesis backend @<:@default autodetect@:>@])],
|
[AS_HELP_STRING([--enable-exporting-kinesis], [enable kinesis exporting connector @<:@default autodetect@:>@])],
|
||||||
,
|
,
|
||||||
[enable_backend_kinesis="detect"]
|
[enable_exporting_kinesis="detect"]
|
||||||
)
|
)
|
||||||
AC_ARG_ENABLE(
|
AC_ARG_ENABLE(
|
||||||
[exporting-pubsub],
|
[exporting-pubsub],
|
||||||
@ -83,16 +83,16 @@ AC_ARG_ENABLE(
|
|||||||
[enable_exporting_pubsub="detect"]
|
[enable_exporting_pubsub="detect"]
|
||||||
)
|
)
|
||||||
AC_ARG_ENABLE(
|
AC_ARG_ENABLE(
|
||||||
[backend-prometheus-remote-write],
|
[exporting-prometheus-remote-write],
|
||||||
[AS_HELP_STRING([--enable-backend-prometheus-remote-write], [enable prometheus remote write backend @<:@default autodetect@:>@])],
|
[AS_HELP_STRING([--enable-exporting-prometheus-remote-write], [enable prometheus remote write exporting connector @<:@default autodetect@:>@])],
|
||||||
,
|
,
|
||||||
[enable_backend_prometheus_remote_write="detect"]
|
[enable_exporting_prometheus_remote_write="detect"]
|
||||||
)
|
)
|
||||||
AC_ARG_ENABLE(
|
AC_ARG_ENABLE(
|
||||||
[backend-mongodb],
|
[exporting-mongodb],
|
||||||
[AS_HELP_STRING([--enable-backend-mongodb], [enable mongodb backend @<:@default autodetect@:>@])],
|
[AS_HELP_STRING([--enable-exporting-mongodb], [enable mongodb exporting @<:@default autodetect@:>@])],
|
||||||
,
|
,
|
||||||
[enable_backend_mongodb="detect"]
|
[enable_exporting_mongodb="detect"]
|
||||||
)
|
)
|
||||||
AC_ARG_ENABLE(
|
AC_ARG_ENABLE(
|
||||||
[pedantic],
|
[pedantic],
|
||||||
@ -1246,7 +1246,7 @@ AM_CONDITIONAL([ENABLE_PLUGIN_SLABINFO], [test "${enable_plugin_slabinfo}" = "ye
|
|||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# AWS Kinesis backend - libaws-cpp-sdk-kinesis, libaws-cpp-sdk-core, libssl, libcrypto, libcurl
|
# AWS Kinesis exporting connector - libaws-cpp-sdk-kinesis, libaws-cpp-sdk-core, libssl, libcrypto, libcurl
|
||||||
|
|
||||||
PKG_CHECK_MODULES(
|
PKG_CHECK_MODULES(
|
||||||
[LIBCRYPTO],
|
[LIBCRYPTO],
|
||||||
@ -1298,39 +1298,39 @@ PKG_CHECK_MODULES(
|
|||||||
[have_libaws_cpp_sdk_kinesis=no]
|
[have_libaws_cpp_sdk_kinesis=no]
|
||||||
)
|
)
|
||||||
|
|
||||||
test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_kinesis}" != "yes" && \
|
test "${enable_exporting_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_kinesis}" != "yes" && \
|
||||||
AC_MSG_ERROR([libaws-cpp-sdk-kinesis required but not found. try installing AWS C++ SDK])
|
AC_MSG_ERROR([libaws-cpp-sdk-kinesis required but not found. try installing AWS C++ SDK])
|
||||||
|
|
||||||
test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_core}" != "yes" && \
|
test "${enable_exporting_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_core}" != "yes" && \
|
||||||
AC_MSG_ERROR([libaws-cpp-sdk-core required but not found. try installing AWS C++ SDK])
|
AC_MSG_ERROR([libaws-cpp-sdk-core required but not found. try installing AWS C++ SDK])
|
||||||
|
|
||||||
test "${enable_backend_kinesis}" = "yes" -a "${have_libcurl}" != "yes" && \
|
test "${enable_exporting_kinesis}" = "yes" -a "${have_libcurl}" != "yes" && \
|
||||||
AC_MSG_ERROR([libcurl required but not found])
|
AC_MSG_ERROR([libcurl required but not found])
|
||||||
|
|
||||||
test "${enable_backend_kinesis}" = "yes" -a "${have_libssl}" != "yes" && \
|
test "${enable_exporting_kinesis}" = "yes" -a "${have_libssl}" != "yes" && \
|
||||||
AC_MSG_ERROR([libssl required but not found])
|
AC_MSG_ERROR([libssl required but not found])
|
||||||
|
|
||||||
test "${enable_backend_kinesis}" = "yes" -a "${have_libcrypto}" != "yes" && \
|
test "${enable_exporting_kinesis}" = "yes" -a "${have_libcrypto}" != "yes" && \
|
||||||
AC_MSG_ERROR([libcrypto required but not found])
|
AC_MSG_ERROR([libcrypto required but not found])
|
||||||
|
|
||||||
AC_MSG_CHECKING([if kinesis backend should be enabled])
|
AC_MSG_CHECKING([if kinesis exporting connector should be enabled])
|
||||||
if test "${enable_backend_kinesis}" != "no" -a "${have_libaws_cpp_sdk_kinesis}" = "yes" \
|
if test "${enable_exporting_kinesis}" != "no" -a "${have_libaws_cpp_sdk_kinesis}" = "yes" \
|
||||||
-a "${have_libaws_cpp_sdk_core}" = "yes" \
|
-a "${have_libaws_cpp_sdk_core}" = "yes" \
|
||||||
-a "${have_libcurl}" = "yes" \
|
-a "${have_libcurl}" = "yes" \
|
||||||
-a "${have_libssl}" = "yes" \
|
-a "${have_libssl}" = "yes" \
|
||||||
-a "${have_libcrypto}" = "yes"; then
|
-a "${have_libcrypto}" = "yes"; then
|
||||||
enable_backend_kinesis="yes"
|
enable_exporting_kinesis="yes"
|
||||||
AC_DEFINE([HAVE_KINESIS], [1], [libaws-cpp-sdk-kinesis usability])
|
AC_DEFINE([HAVE_KINESIS], [1], [libaws-cpp-sdk-kinesis usability])
|
||||||
OPTIONAL_KINESIS_CFLAGS="${LIBCRYPTO_CFLAGS} ${LIBSSL_CFLAGS} ${LIBCURL_CFLAGS}"
|
OPTIONAL_KINESIS_CFLAGS="${LIBCRYPTO_CFLAGS} ${LIBSSL_CFLAGS} ${LIBCURL_CFLAGS}"
|
||||||
CXX11FLAG="${AWS_CPP_SDK_KINESIS_CFLAGS} ${AWS_CPP_SDK_CORE_CFLAGS}"
|
CXX11FLAG="${AWS_CPP_SDK_KINESIS_CFLAGS} ${AWS_CPP_SDK_CORE_CFLAGS}"
|
||||||
OPTIONAL_KINESIS_LIBS="${AWS_CPP_SDK_KINESIS_LIBS} ${AWS_CPP_SDK_CORE_LIBS} \
|
OPTIONAL_KINESIS_LIBS="${AWS_CPP_SDK_KINESIS_LIBS} ${AWS_CPP_SDK_CORE_LIBS} \
|
||||||
${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}"
|
${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}"
|
||||||
else
|
else
|
||||||
enable_backend_kinesis="no"
|
enable_exporting_kinesis="no"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_MSG_RESULT([${enable_backend_kinesis}])
|
AC_MSG_RESULT([${enable_exporting_kinesis}])
|
||||||
AM_CONDITIONAL([ENABLE_BACKEND_KINESIS], [test "${enable_backend_kinesis}" = "yes"])
|
AM_CONDITIONAL([ENABLE_EXPORTING_KINESIS], [test "${enable_exporting_kinesis}" = "yes"])
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
@ -1365,7 +1365,7 @@ test "${enable_pubsub}" = "yes" -a "${have_grpc}" != "yes" && \
|
|||||||
test "${enable_pubsub}" = "yes" -a "${have_pubsub_protos}" != "yes" && \
|
test "${enable_pubsub}" = "yes" -a "${have_pubsub_protos}" != "yes" && \
|
||||||
AC_MSG_ERROR([libgoogleapis_cpp_pubsub_protos required but not found. try installing googleapis])
|
AC_MSG_ERROR([libgoogleapis_cpp_pubsub_protos required but not found. try installing googleapis])
|
||||||
|
|
||||||
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
|
test "${enable_exporting_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
|
||||||
AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
|
AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
|
||||||
|
|
||||||
AC_MSG_CHECKING([if pubsub exporting connector should be enabled])
|
AC_MSG_CHECKING([if pubsub exporting connector should be enabled])
|
||||||
@ -1384,7 +1384,7 @@ AM_CONDITIONAL([ENABLE_EXPORTING_PUBSUB], [test "${enable_exporting_pubsub}" = "
|
|||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Prometheus remote write backend - libprotobuf, libsnappy, protoc
|
# Prometheus remote write exporting connector - libprotobuf, libsnappy, protoc
|
||||||
|
|
||||||
AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy])
|
AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy])
|
||||||
|
|
||||||
@ -1421,22 +1421,22 @@ AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy])
|
|||||||
|
|
||||||
AC_MSG_RESULT([${have_libsnappy}])
|
AC_MSG_RESULT([${have_libsnappy}])
|
||||||
|
|
||||||
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \
|
test "${enable_exporting_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \
|
||||||
AC_MSG_ERROR([libprotobuf required but not found. try installing protobuf])
|
AC_MSG_ERROR([libprotobuf required but not found. try installing protobuf])
|
||||||
|
|
||||||
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libsnappy}" != "yes" && \
|
test "${enable_exporting_prometheus_remote_write}" = "yes" -a "${have_libsnappy}" != "yes" && \
|
||||||
AC_MSG_ERROR([libsnappy required but not found. try installing snappy])
|
AC_MSG_ERROR([libsnappy required but not found. try installing snappy])
|
||||||
|
|
||||||
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_protoc}" != "yes" && \
|
test "${enable_exporting_prometheus_remote_write}" = "yes" -a "${have_protoc}" != "yes" && \
|
||||||
AC_MSG_ERROR([protoc compiler required but not found. try installing protobuf])
|
AC_MSG_ERROR([protoc compiler required but not found. try installing protobuf])
|
||||||
|
|
||||||
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
|
test "${enable_exporting_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
|
||||||
AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
|
AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
|
||||||
|
|
||||||
AC_MSG_CHECKING([if prometheus remote write backend should be enabled])
|
AC_MSG_CHECKING([if prometheus remote write exporting connector should be enabled])
|
||||||
if test "${enable_backend_prometheus_remote_write}" != "no" -a "${have_libprotobuf}" = "yes" -a "${have_libsnappy}" = "yes" \
|
if test "${enable_exporting_prometheus_remote_write}" != "no" -a "${have_libprotobuf}" = "yes" -a "${have_libsnappy}" = "yes" \
|
||||||
-a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
|
-a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
|
||||||
enable_backend_prometheus_remote_write="yes"
|
enable_exporting_prometheus_remote_write="yes"
|
||||||
AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability])
|
AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability])
|
||||||
OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${SNAPPY_CFLAGS} -I \$(abs_top_srcdir)/exporting/prometheus/remote_write"
|
OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${SNAPPY_CFLAGS} -I \$(abs_top_srcdir)/exporting/prometheus/remote_write"
|
||||||
CXX11FLAG="-std=c++11"
|
CXX11FLAG="-std=c++11"
|
||||||
@ -1444,15 +1444,15 @@ if test "${enable_backend_prometheus_remote_write}" != "no" -a "${have_libprotob
|
|||||||
OPTIONAL_PROTOBUF_CFLAGS="${PROTOBUF_CFLAGS}"
|
OPTIONAL_PROTOBUF_CFLAGS="${PROTOBUF_CFLAGS}"
|
||||||
OPTIONAL_PROTOBUF_LIBS="${PROTOBUF_LIBS}"
|
OPTIONAL_PROTOBUF_LIBS="${PROTOBUF_LIBS}"
|
||||||
else
|
else
|
||||||
enable_backend_prometheus_remote_write="no"
|
enable_exporting_prometheus_remote_write="no"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_MSG_RESULT([${enable_backend_prometheus_remote_write}])
|
AC_MSG_RESULT([${enable_exporting_prometheus_remote_write}])
|
||||||
AM_CONDITIONAL([ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE], [test "${enable_backend_prometheus_remote_write}" = "yes"])
|
AM_CONDITIONAL([ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE], [test "${enable_exporting_prometheus_remote_write}" = "yes"])
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# MongoDB backend - libmongoc
|
# MongoDB exporting connector - libmongoc
|
||||||
|
|
||||||
PKG_CHECK_MODULES(
|
PKG_CHECK_MODULES(
|
||||||
[LIBMONGOC],
|
[LIBMONGOC],
|
||||||
@ -1461,21 +1461,21 @@ PKG_CHECK_MODULES(
|
|||||||
[have_libmongoc=no]
|
[have_libmongoc=no]
|
||||||
)
|
)
|
||||||
|
|
||||||
test "${enable_backend_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \
|
test "${enable_exporting_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \
|
||||||
AC_MSG_ERROR([libmongoc required but not found. Try installing `mongoc`.])
|
AC_MSG_ERROR([libmongoc required but not found. Try installing `mongoc`.])
|
||||||
|
|
||||||
AC_MSG_CHECKING([if mongodb backend should be enabled])
|
AC_MSG_CHECKING([if mongodb exporting connector should be enabled])
|
||||||
if test "${enable_backend_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then
|
if test "${enable_exporting_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then
|
||||||
enable_backend_mongodb="yes"
|
enable_exporting_mongodb="yes"
|
||||||
AC_DEFINE([HAVE_MONGOC], [1], [libmongoc usability])
|
AC_DEFINE([HAVE_MONGOC], [1], [libmongoc usability])
|
||||||
OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}"
|
OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}"
|
||||||
OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}"
|
OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}"
|
||||||
else
|
else
|
||||||
enable_backend_mongodb="no"
|
enable_exporting_mongodb="no"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_MSG_RESULT([${enable_backend_mongodb}])
|
AC_MSG_RESULT([${enable_exporting_mongodb}])
|
||||||
AM_CONDITIONAL([ENABLE_BACKEND_MONGODB], [test "${enable_backend_mongodb}" = "yes"])
|
AM_CONDITIONAL([ENABLE_EXPORTING_MONGODB], [test "${enable_exporting_mongodb}" = "yes"])
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
@ -1523,9 +1523,9 @@ AC_MSG_RESULT([${enable_lto}])
|
|||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
if test "${enable_backend_kinesis}" = "yes" -o \
|
if test "${enable_exporting_kinesis}" = "yes" -o \
|
||||||
"${enable_exporting_pubsub}" = "yes" -o \
|
"${enable_exporting_pubsub}" = "yes" -o \
|
||||||
"${enable_backend_prometheus_remote_write}" = "yes" -o \
|
"${enable_exporting_prometheus_remote_write}" = "yes" -o \
|
||||||
"${new_cloud_protocol}" = "yes" -o \
|
"${new_cloud_protocol}" = "yes" -o \
|
||||||
"${build_ml}" = "yes"; then
|
"${build_ml}" = "yes"; then
|
||||||
enable_cxx_linker="yes"
|
enable_cxx_linker="yes"
|
||||||
@ -1688,14 +1688,6 @@ AC_DEFINE_UNQUOTED(
|
|||||||
AC_CONFIG_FILES([
|
AC_CONFIG_FILES([
|
||||||
Makefile
|
Makefile
|
||||||
netdata.spec
|
netdata.spec
|
||||||
backends/graphite/Makefile
|
|
||||||
backends/json/Makefile
|
|
||||||
backends/Makefile
|
|
||||||
backends/opentsdb/Makefile
|
|
||||||
backends/prometheus/Makefile
|
|
||||||
backends/prometheus/remote_write/Makefile
|
|
||||||
backends/aws_kinesis/Makefile
|
|
||||||
backends/mongodb/Makefile
|
|
||||||
collectors/Makefile
|
collectors/Makefile
|
||||||
collectors/apps.plugin/Makefile
|
collectors/apps.plugin/Makefile
|
||||||
collectors/cgroups.plugin/Makefile
|
collectors/cgroups.plugin/Makefile
|
||||||
|
|||||||
@ -175,11 +175,11 @@ OTHER_OPTIONS+=" --enable-jsonc"
|
|||||||
OTHER_OPTIONS+=" --enable-plugin-nfacct"
|
OTHER_OPTIONS+=" --enable-plugin-nfacct"
|
||||||
OTHER_OPTIONS+=" --enable-plugin-freeipmi"
|
OTHER_OPTIONS+=" --enable-plugin-freeipmi"
|
||||||
OTHER_OPTIONS+=" --enable-plugin-cups"
|
OTHER_OPTIONS+=" --enable-plugin-cups"
|
||||||
OTHER_OPTIONS+=" --enable-backend-prometheus-remote-write"
|
OTHER_OPTIONS+=" --enable-exporting-prometheus-remote-write"
|
||||||
# TODO: enable these plugins too
|
# TODO: enable these plugins too
|
||||||
#OTHER_OPTIONS+=" --enable-plugin-xenstat"
|
#OTHER_OPTIONS+=" --enable-plugin-xenstat"
|
||||||
#OTHER_OPTIONS+=" --enable-backend-kinesis"
|
#OTHER_OPTIONS+=" --enable-exporting-kinesis"
|
||||||
#OTHER_OPTIONS+=" --enable-backend-mongodb"
|
#OTHER_OPTIONS+=" --enable-exporting-mongodb"
|
||||||
|
|
||||||
FOUND_OPTS="NO"
|
FOUND_OPTS="NO"
|
||||||
while [ -n "${1}" ]; do
|
while [ -n "${1}" ]; do
|
||||||
|
|||||||
@ -51,9 +51,7 @@
|
|||||||
// the registry is actually an API feature
|
// the registry is actually an API feature
|
||||||
#include "registry/registry.h"
|
#include "registry/registry.h"
|
||||||
|
|
||||||
// backends for archiving the metrics
|
// exporting engine for archiving the metrics
|
||||||
#include "backends/backends.h"
|
|
||||||
// the new exporting engine for archiving the metrics
|
|
||||||
#include "exporting/exporting_engine.h"
|
#include "exporting/exporting_engine.h"
|
||||||
|
|
||||||
// the netdata API
|
// the netdata API
|
||||||
|
|||||||
@ -19,10 +19,9 @@ This config file **is not needed by default**. Netdata works fine out of the box
|
|||||||
settings.
|
settings.
|
||||||
4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/health/README.md)
|
4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/health/README.md)
|
||||||
5. `[registry]` for the [Netdata registry](/registry/README.md).
|
5. `[registry]` for the [Netdata registry](/registry/README.md).
|
||||||
6. `[backend]` to set up [streaming and replication](/streaming/README.md) options.
|
6. `[statsd]` for the general settings of the [stats.d.plugin](/collectors/statsd.plugin/README.md).
|
||||||
7. `[statsd]` for the general settings of the [stats.d.plugin](/collectors/statsd.plugin/README.md).
|
7. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
|
||||||
8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
|
8. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
|
||||||
9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
|
|
||||||
|
|
||||||
The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a comment on settings it does not currently use.
|
The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a comment on settings it does not currently use.
|
||||||
|
|
||||||
@ -129,10 +128,6 @@ monitoring](/health/README.md).
|
|||||||
|
|
||||||
To understand what this section is and how it should be configured, please refer to the [registry documentation](/registry/README.md).
|
To understand what this section is and how it should be configured, please refer to the [registry documentation](/registry/README.md).
|
||||||
|
|
||||||
### [backend]
|
|
||||||
|
|
||||||
Refer to the [streaming and replication](/streaming/README.md) documentation.
|
|
||||||
|
|
||||||
## Per-plugin configuration
|
## Per-plugin configuration
|
||||||
|
|
||||||
The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`.
|
The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`.
|
||||||
|
|||||||
@ -451,9 +451,6 @@ static void backwards_compatible_config() {
|
|||||||
|
|
||||||
config_move(CONFIG_SECTION_GLOBAL, "web files group",
|
config_move(CONFIG_SECTION_GLOBAL, "web files group",
|
||||||
CONFIG_SECTION_WEB, "web files group");
|
CONFIG_SECTION_WEB, "web files group");
|
||||||
|
|
||||||
config_move(CONFIG_SECTION_BACKEND, "opentsdb host tags",
|
|
||||||
CONFIG_SECTION_BACKEND, "host tags");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void get_netdata_configured_variables() {
|
static void get_netdata_configured_variables() {
|
||||||
|
|||||||
@ -85,15 +85,6 @@ const struct netdata_static_thread static_threads_common[] = {
|
|||||||
.init_routine = NULL,
|
.init_routine = NULL,
|
||||||
.start_routine = statsd_main
|
.start_routine = statsd_main
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.name = "BACKENDS",
|
|
||||||
.config_section = NULL,
|
|
||||||
.config_name = NULL,
|
|
||||||
.enabled = 1,
|
|
||||||
.thread = NULL,
|
|
||||||
.init_routine = NULL,
|
|
||||||
.start_routine = backends_main
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.name = "EXPORTING",
|
.name = "EXPORTING",
|
||||||
.config_section = NULL,
|
.config_section = NULL,
|
||||||
|
|||||||
@ -1521,7 +1521,7 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
|
|||||||
, netdata_configured_timezone
|
, netdata_configured_timezone
|
||||||
, netdata_configured_abbrev_timezone
|
, netdata_configured_abbrev_timezone
|
||||||
, netdata_configured_utc_offset
|
, netdata_configured_utc_offset
|
||||||
, config_get(CONFIG_SECTION_BACKEND, "host tags", "")
|
, ""
|
||||||
, program_name
|
, program_name
|
||||||
, program_version
|
, program_version
|
||||||
, default_rrd_update_every
|
, default_rrd_update_every
|
||||||
|
|||||||
@ -461,23 +461,21 @@ typedef enum rrdset_flags {
|
|||||||
// (the master data set should be the one that has the same family and is not detail)
|
// (the master data set should be the one that has the same family and is not detail)
|
||||||
RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
|
RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
|
||||||
RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
|
RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
|
||||||
RRDSET_FLAG_EXPORTING_SEND = 1 << 4, // if set, this chart should be sent to Prometheus web API
|
RRDSET_FLAG_EXPORTING_SEND = 1 << 4, // if set, this chart should be sent to Prometheus web API and external databases
|
||||||
RRDSET_FLAG_EXPORTING_IGNORE = 1 << 5, // if set, this chart should not be sent to Prometheus web API
|
RRDSET_FLAG_EXPORTING_IGNORE = 1 << 5, // if set, this chart should not be sent to Prometheus web API and external databases
|
||||||
RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
|
RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
|
||||||
RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
|
RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
|
||||||
RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata parent (streaming)
|
RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata parent (streaming)
|
||||||
RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
|
RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
|
||||||
RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
|
RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
|
||||||
RRDSET_FLAG_HOMOGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions are homogeneous
|
RRDSET_FLAG_HOMOGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions are homogeneous
|
||||||
RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
|
RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for exporting
|
||||||
RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
|
RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
|
||||||
RRDSET_FLAG_OBSOLETE_DIMENSIONS = 1 << 14, // this is marked by the collector/module when a chart has obsolete dimensions
|
RRDSET_FLAG_OBSOLETE_DIMENSIONS = 1 << 14, // this is marked by the collector/module when a chart has obsolete dimensions
|
||||||
// No new values have been collected for this chart since agent start or it was marked RRDSET_FLAG_OBSOLETE at
|
// No new values have been collected for this chart since agent start or it was marked RRDSET_FLAG_OBSOLETE at
|
||||||
// least rrdset_free_obsolete_time seconds ago.
|
// least rrdset_free_obsolete_time seconds ago.
|
||||||
RRDSET_FLAG_ARCHIVED = 1 << 15,
|
RRDSET_FLAG_ARCHIVED = 1 << 15,
|
||||||
RRDSET_FLAG_ACLK = 1 << 16,
|
RRDSET_FLAG_ACLK = 1 << 16,
|
||||||
RRDSET_FLAG_BACKEND_SEND = 1 << 17, // if set, this chart should be sent to backends
|
|
||||||
RRDSET_FLAG_BACKEND_IGNORE = 1 << 18 // if set, this chart should not be sent to backends
|
|
||||||
} RRDSET_FLAGS;
|
} RRDSET_FLAGS;
|
||||||
|
|
||||||
#ifdef HAVE_C___ATOMIC
|
#ifdef HAVE_C___ATOMIC
|
||||||
@ -632,8 +630,8 @@ typedef enum rrdhost_flags {
|
|||||||
RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
|
RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
|
||||||
RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
|
RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
|
||||||
RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan
|
RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan
|
||||||
RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends
|
RRDHOST_FLAG_EXPORTING_SEND = 1 << 3, // send it to external databases
|
||||||
RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends
|
RRDHOST_FLAG_EXPORTING_DONT_SEND = 1 << 4, // don't send it to external databases
|
||||||
RRDHOST_FLAG_ARCHIVED = 1 << 5, // The host is archived, no collected charts yet
|
RRDHOST_FLAG_ARCHIVED = 1 << 5, // The host is archived, no collected charts yet
|
||||||
RRDHOST_FLAG_MULTIHOST = 1 << 6, // Host belongs to localhost/megadb
|
RRDHOST_FLAG_MULTIHOST = 1 << 6, // Host belongs to localhost/megadb
|
||||||
} RRDHOST_FLAGS;
|
} RRDHOST_FLAGS;
|
||||||
|
|||||||
@ -718,7 +718,7 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
|
|||||||
, netdata_configured_timezone
|
, netdata_configured_timezone
|
||||||
, netdata_configured_abbrev_timezone
|
, netdata_configured_abbrev_timezone
|
||||||
, netdata_configured_utc_offset
|
, netdata_configured_utc_offset
|
||||||
, config_get(CONFIG_SECTION_BACKEND, "host tags", "")
|
, ""
|
||||||
, program_name
|
, program_name
|
||||||
, program_version
|
, program_version
|
||||||
, default_rrd_update_every
|
, default_rrd_update_every
|
||||||
@ -1233,50 +1233,6 @@ struct label *parse_json_tags(struct label *label_list, const char *tags)
|
|||||||
return label_list;
|
return label_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct label *rrdhost_load_labels_from_tags(void)
|
|
||||||
{
|
|
||||||
if (!localhost->tags)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
struct label *label_list = NULL;
|
|
||||||
BACKEND_TYPE type = BACKEND_TYPE_UNKNOWN;
|
|
||||||
|
|
||||||
if (config_exists(CONFIG_SECTION_BACKEND, "enabled")) {
|
|
||||||
if (config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", CONFIG_BOOLEAN_NO) != CONFIG_BOOLEAN_NO) {
|
|
||||||
const char *type_name = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
|
|
||||||
type = backend_select_type(type_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
case BACKEND_TYPE_GRAPHITE:
|
|
||||||
label_list = parse_simple_tags(
|
|
||||||
label_list, localhost->tags, '=', ';', DO_NOT_STRIP_QUOTES, DO_NOT_STRIP_QUOTES,
|
|
||||||
DO_NOT_SKIP_ESCAPED_CHARACTERS);
|
|
||||||
break;
|
|
||||||
case BACKEND_TYPE_OPENTSDB_USING_TELNET:
|
|
||||||
label_list = parse_simple_tags(
|
|
||||||
label_list, localhost->tags, '=', ' ', DO_NOT_STRIP_QUOTES, DO_NOT_STRIP_QUOTES,
|
|
||||||
DO_NOT_SKIP_ESCAPED_CHARACTERS);
|
|
||||||
break;
|
|
||||||
case BACKEND_TYPE_OPENTSDB_USING_HTTP:
|
|
||||||
label_list = parse_simple_tags(
|
|
||||||
label_list, localhost->tags, ':', ',', STRIP_QUOTES, STRIP_QUOTES,
|
|
||||||
DO_NOT_SKIP_ESCAPED_CHARACTERS);
|
|
||||||
break;
|
|
||||||
case BACKEND_TYPE_JSON:
|
|
||||||
label_list = parse_json_tags(label_list, localhost->tags);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
label_list = parse_simple_tags(
|
|
||||||
label_list, localhost->tags, '=', ',', DO_NOT_STRIP_QUOTES, STRIP_QUOTES,
|
|
||||||
DO_NOT_SKIP_ESCAPED_CHARACTERS);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return label_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct label *rrdhost_load_kubernetes_labels(void)
|
static struct label *rrdhost_load_kubernetes_labels(void)
|
||||||
{
|
{
|
||||||
struct label *l=NULL;
|
struct label *l=NULL;
|
||||||
@ -1340,10 +1296,8 @@ void reload_host_labels(void)
|
|||||||
struct label *from_auto = rrdhost_load_auto_labels();
|
struct label *from_auto = rrdhost_load_auto_labels();
|
||||||
struct label *from_k8s = rrdhost_load_kubernetes_labels();
|
struct label *from_k8s = rrdhost_load_kubernetes_labels();
|
||||||
struct label *from_config = rrdhost_load_config_labels();
|
struct label *from_config = rrdhost_load_config_labels();
|
||||||
struct label *from_tags = rrdhost_load_labels_from_tags();
|
|
||||||
|
|
||||||
struct label *new_labels = merge_label_lists(from_auto, from_k8s);
|
struct label *new_labels = merge_label_lists(from_auto, from_k8s);
|
||||||
new_labels = merge_label_lists(new_labels, from_tags);
|
|
||||||
new_labels = merge_label_lists(new_labels, from_config);
|
new_labels = merge_label_lists(new_labels, from_config);
|
||||||
|
|
||||||
rrdhost_rdlock(localhost);
|
rrdhost_rdlock(localhost);
|
||||||
|
|||||||
@ -190,8 +190,6 @@ int rrdset_set_name(RRDSET *st, const char *name) {
|
|||||||
|
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_SEND);
|
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_SEND);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND);
|
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE);
|
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
|
||||||
@ -872,8 +870,6 @@ RRDSET *rrdset_create_custom(
|
|||||||
rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
|
rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_SEND);
|
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_SEND);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND);
|
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE);
|
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
|
||||||
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
|
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
|
||||||
|
|||||||
@ -85,6 +85,5 @@ Graphite_](/docs/guides/export/export-netdata-metrics-graphite.md).
|
|||||||
### Related reference documentation
|
### Related reference documentation
|
||||||
|
|
||||||
- [Exporting engine reference](/exporting/README.md)
|
- [Exporting engine reference](/exporting/README.md)
|
||||||
- [Backends reference (deprecated)](/backends/README.md)
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -86,7 +86,7 @@ They capture the following:
|
|||||||
- What virtualization layer the system runs on top of, if any
|
- What virtualization layer the system runs on top of, if any
|
||||||
- Whether the system is a streaming parent or child
|
- Whether the system is a streaming parent or child
|
||||||
|
|
||||||
If you want to organize your systems without manually creating host tags, try the automatic labels in some of the
|
If you want to organize your systems without manually creating host labels, try the automatic labels in some of the
|
||||||
features below.
|
features below.
|
||||||
|
|
||||||
## Host labels in streaming
|
## Host labels in streaming
|
||||||
|
|||||||
@ -20,12 +20,6 @@ the same time. You can have different update intervals and filters configured fo
|
|||||||
When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
|
When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
|
||||||
restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
|
restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
|
||||||
|
|
||||||
The exporting engine has its own configuration file `exporting.conf`. The configuration is almost similar to the
|
|
||||||
deprecated [backends](/backends/README.md#configuration) system. The most important difference is that type of a
|
|
||||||
connector should be specified in a section name before a colon and an instance name after the colon. Also, you can't use
|
|
||||||
`host tags` anymore. Set your labels using the [`[host labels]`](/docs/guides/using-host-labels.md) section in
|
|
||||||
`netdata.conf`.
|
|
||||||
|
|
||||||
Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
|
Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
|
||||||
several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
|
several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
|
||||||
|
|
||||||
@ -271,12 +265,6 @@ Configure individual connectors and override any global settings with the follow
|
|||||||
- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
|
- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
|
||||||
should be sent to the external database
|
should be sent to the external database
|
||||||
|
|
||||||
> Starting from Netdata v1.20 the host tags (defined in the `[backend]` section of `netdata.conf`) are parsed in
|
|
||||||
> accordance with a configured backend type and stored as host labels so that they can be reused in API responses and
|
|
||||||
> exporting connectors. The parsing is supported for graphite, json, opentsdb, and prometheus (default) backend types.
|
|
||||||
> You can check how the host tags were parsed using the /api/v1/info API call. But, keep in mind that backends subsystem
|
|
||||||
> is deprecated and will be deleted soon. Please move your existing tags to the `[host labels]` section.
|
|
||||||
|
|
||||||
## HTTPS
|
## HTTPS
|
||||||
|
|
||||||
Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
|
Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
|
||||||
|
|||||||
@ -20,7 +20,7 @@ What's TimescaleDB? Here's how their team defines the project on their [GitHub p
|
|||||||
|
|
||||||
To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
|
To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
|
||||||
repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem
|
repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem
|
||||||
is deprecated and Netdata configuration should be moved to the new `exporting conf` configuration file. Use
|
was removed and Netdata configuration should be moved to the new `exporting.conf` configuration file. Use
|
||||||
```conf
|
```conf
|
||||||
[json:my_instance]
|
[json:my_instance]
|
||||||
```
|
```
|
||||||
|
|||||||
@ -151,7 +151,7 @@ void aws_kinesis_connector_worker(void *instance_p)
|
|||||||
char error_message[ERROR_LINE_MAX + 1] = "";
|
char error_message[ERROR_LINE_MAX + 1] = "";
|
||||||
|
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, \
|
"EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, \
|
||||||
buffer = %zu, record = %zu",
|
buffer = %zu, record = %zu",
|
||||||
instance->config.destination,
|
instance->config.destination,
|
||||||
@ -175,7 +175,7 @@ void aws_kinesis_connector_worker(void *instance_p)
|
|||||||
// oops! we couldn't send (all or some of the) data
|
// oops! we couldn't send (all or some of the) data
|
||||||
error("EXPORTING: %s", error_message);
|
error("EXPORTING: %s", error_message);
|
||||||
error(
|
error(
|
||||||
"EXPORTING: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
|
"EXPORTING: failed to write data to external database '%s'. Willing to write %zu bytes, wrote %zu bytes.",
|
||||||
instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
|
instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
|
||||||
|
|
||||||
stats->transmission_failures++;
|
stats->transmission_failures++;
|
||||||
|
|||||||
@ -16,19 +16,19 @@ int rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
|
|||||||
|
|
||||||
RRDHOST_FLAGS *flags = &host->exporting_flags[instance->index];
|
RRDHOST_FLAGS *flags = &host->exporting_flags[instance->index];
|
||||||
|
|
||||||
if (unlikely((*flags & (RRDHOST_FLAG_BACKEND_SEND | RRDHOST_FLAG_BACKEND_DONT_SEND)) == 0)) {
|
if (unlikely((*flags & (RRDHOST_FLAG_EXPORTING_SEND | RRDHOST_FLAG_EXPORTING_DONT_SEND)) == 0)) {
|
||||||
char *host_name = (host == localhost) ? "localhost" : host->hostname;
|
char *host_name = (host == localhost) ? "localhost" : host->hostname;
|
||||||
|
|
||||||
if (!instance->config.hosts_pattern || simple_pattern_matches(instance->config.hosts_pattern, host_name)) {
|
if (!instance->config.hosts_pattern || simple_pattern_matches(instance->config.hosts_pattern, host_name)) {
|
||||||
*flags |= RRDHOST_FLAG_BACKEND_SEND;
|
*flags |= RRDHOST_FLAG_EXPORTING_SEND;
|
||||||
info("enabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
|
info("enabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
|
||||||
} else {
|
} else {
|
||||||
*flags |= RRDHOST_FLAG_BACKEND_DONT_SEND;
|
*flags |= RRDHOST_FLAG_EXPORTING_DONT_SEND;
|
||||||
info("disabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
|
info("disabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(*flags & RRDHOST_FLAG_BACKEND_SEND))
|
if (likely(*flags & RRDHOST_FLAG_EXPORTING_SEND))
|
||||||
return 1;
|
return 1;
|
||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
@ -65,18 +65,18 @@ int rrdset_is_exportable(struct instance *instance, RRDSET *st)
|
|||||||
*flags |= RRDSET_FLAG_EXPORTING_SEND;
|
*flags |= RRDSET_FLAG_EXPORTING_SEND;
|
||||||
else {
|
else {
|
||||||
*flags |= RRDSET_FLAG_EXPORTING_IGNORE;
|
*flags |= RRDSET_FLAG_EXPORTING_IGNORE;
|
||||||
debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
|
debug(D_EXPORTING, "EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.", st->id, host->hostname);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(unlikely(!rrdset_is_available_for_exporting_and_alarms(st))) {
|
if(unlikely(!rrdset_is_available_for_exporting_and_alarms(st))) {
|
||||||
debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
|
debug(D_EXPORTING, "EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.", st->id, host->hostname);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
|
if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
|
||||||
debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
|
debug(D_EXPORTING, "EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting engine requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -34,6 +34,9 @@ typedef enum exporting_options {
|
|||||||
(EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_SOURCE_DATA_SUM)
|
(EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_SOURCE_DATA_SUM)
|
||||||
#define EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) (exporting_options & EXPORTING_OPTIONS_SOURCE_BITS)
|
#define EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) (exporting_options & EXPORTING_OPTIONS_SOURCE_BITS)
|
||||||
|
|
||||||
|
extern EXPORTING_OPTIONS global_exporting_options;
|
||||||
|
extern const char *global_exporting_prefix;
|
||||||
|
|
||||||
#define sending_labels_configured(instance) \
|
#define sending_labels_configured(instance) \
|
||||||
(instance->config.options & (EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS))
|
(instance->config.options & (EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS))
|
||||||
|
|
||||||
@ -51,11 +54,11 @@ typedef enum exporting_connector_types {
|
|||||||
EXPORTING_CONNECTOR_TYPE_JSON_HTTP, // Send data in JSON format using HTTP API
|
EXPORTING_CONNECTOR_TYPE_JSON_HTTP, // Send data in JSON format using HTTP API
|
||||||
EXPORTING_CONNECTOR_TYPE_OPENTSDB, // Send data to OpenTSDB using telnet API
|
EXPORTING_CONNECTOR_TYPE_OPENTSDB, // Send data to OpenTSDB using telnet API
|
||||||
EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP, // Send data to OpenTSDB using HTTP API
|
EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP, // Send data to OpenTSDB using HTTP API
|
||||||
EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // User selected to use Prometheus backend
|
EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // Send data using Prometheus remote write protocol
|
||||||
EXPORTING_CONNECTOR_TYPE_KINESIS, // Send message to AWS Kinesis
|
EXPORTING_CONNECTOR_TYPE_KINESIS, // Send message to AWS Kinesis
|
||||||
EXPORTING_CONNECTOR_TYPE_PUBSUB, // Send message to Google Cloud Pub/Sub
|
EXPORTING_CONNECTOR_TYPE_PUBSUB, // Send message to Google Cloud Pub/Sub
|
||||||
EXPORTING_CONNECTOR_TYPE_MONGODB, // Send data to MongoDB collection
|
EXPORTING_CONNECTOR_TYPE_MONGODB, // Send data to MongoDB collection
|
||||||
EXPORTING_CONNECTOR_TYPE_NUM // Number of backend types
|
EXPORTING_CONNECTOR_TYPE_NUM // Number of exporting connector types
|
||||||
} EXPORTING_CONNECTOR_TYPE;
|
} EXPORTING_CONNECTOR_TYPE;
|
||||||
|
|
||||||
struct engine;
|
struct engine;
|
||||||
@ -265,6 +268,8 @@ size_t exporting_name_copy(char *dst, const char *src, size_t max_len);
|
|||||||
int rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
|
int rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
|
||||||
int rrdset_is_exportable(struct instance *instance, RRDSET *st);
|
int rrdset_is_exportable(struct instance *instance, RRDSET *st);
|
||||||
|
|
||||||
|
extern EXPORTING_OPTIONS exporting_parse_data_source(const char *source, EXPORTING_OPTIONS exporting_options);
|
||||||
|
|
||||||
calculated_number exporting_calculate_value_from_stored_data(
|
calculated_number exporting_calculate_value_from_stored_data(
|
||||||
struct instance *instance,
|
struct instance *instance,
|
||||||
RRDDIM *rd,
|
RRDDIM *rd,
|
||||||
|
|||||||
@ -327,7 +327,7 @@ void mongodb_connector_worker(void *instance_p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: mongodb_insert(): destination = %s, database = %s, collection = %s, data size = %zu",
|
"EXPORTING: mongodb_insert(): destination = %s, database = %s, collection = %s, data size = %zu",
|
||||||
instance->config.destination,
|
instance->config.destination,
|
||||||
connector_specific_config->database,
|
connector_specific_config->database,
|
||||||
|
|||||||
@ -2,17 +2,17 @@
|
|||||||
|
|
||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
# This is a simple backend database proxy, written in BASH, using the nc command.
|
# This is a simple exporting proxy, written in BASH, using the nc command.
|
||||||
# Run the script without any parameters for help.
|
# Run the script without any parameters for help.
|
||||||
|
|
||||||
MODE="${1}"
|
MODE="${1}"
|
||||||
MY_PORT="${2}"
|
MY_PORT="${2}"
|
||||||
BACKEND_HOST="${3}"
|
EXPORTING_HOST="${3}"
|
||||||
BACKEND_PORT="${4}"
|
EXPORTING_PORT="${4}"
|
||||||
FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
|
FILE="${NETDATA_NC_EXPORTING_DIR-/tmp}/netdata-nc-exporting-${MY_PORT}"
|
||||||
|
|
||||||
log() {
|
log() {
|
||||||
logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
|
logger --stderr --id=$$ --tag "netdata-nc-exporting" "${*}"
|
||||||
}
|
}
|
||||||
|
|
||||||
mync() {
|
mync() {
|
||||||
@ -28,7 +28,7 @@ mync() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
listen_save_replay_forever() {
|
listen_save_replay_forever() {
|
||||||
local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
|
local file="${1}" port="${2}" real_exporting_host="${3}" real_exporting_port="${4}" ret delay=1 started ended
|
||||||
|
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
@ -40,23 +40,23 @@ listen_save_replay_forever() {
|
|||||||
|
|
||||||
if [ -s "${file}" ]
|
if [ -s "${file}" ]
|
||||||
then
|
then
|
||||||
if [ -n "${real_backend_host}" ] && [ -n "${real_backend_port}" ]
|
if [ -n "${real_exporting_host}" ] && [ -n "${real_exporting_port}" ]
|
||||||
then
|
then
|
||||||
log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
|
log "Attempting to send the metrics to the real external database at ${real_exporting_host}:${real_exporting_port}"
|
||||||
|
|
||||||
mync "${real_backend_host}" "${real_backend_port}" <"${file}"
|
mync "${real_exporting_host}" "${real_exporting_port}" <"${file}"
|
||||||
ret=$?
|
ret=$?
|
||||||
|
|
||||||
if [ ${ret} -eq 0 ]
|
if [ ${ret} -eq 0 ]
|
||||||
then
|
then
|
||||||
log "Successfully sent the metrics to ${real_backend_host}:${real_backend_port}"
|
log "Successfully sent the metrics to ${real_exporting_host}:${real_exporting_port}"
|
||||||
mv "${file}" "${file}.old"
|
mv "${file}" "${file}.old"
|
||||||
touch "${file}"
|
touch "${file}"
|
||||||
else
|
else
|
||||||
log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
|
log "Failed to send the metrics to ${real_exporting_host}:${real_exporting_port} (nc returned ${ret}) - appending more data to ${file}"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
log "No backend configured - appending more data to ${file}"
|
log "No external database configured - appending more data to ${file}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ if [ "${MODE}" = "start" ]
|
|||||||
# save our PID to the lock file
|
# save our PID to the lock file
|
||||||
echo "$$" >"${FILE}.lock"
|
echo "$$" >"${FILE}.lock"
|
||||||
|
|
||||||
listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
|
listen_save_replay_forever "${FILE}" "${MY_PORT}" "${EXPORTING_HOST}" "${EXPORTING_PORT}"
|
||||||
ret=$?
|
ret=$?
|
||||||
|
|
||||||
log "listener exited."
|
log "listener exited."
|
||||||
@ -131,20 +131,20 @@ else
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
"${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
|
"${0}" start|stop PORT [EXPORTING_HOST EXPORTING_PORT]
|
||||||
|
|
||||||
PORT The port this script will listen
|
PORT The port this script will listen
|
||||||
(configure netdata to use this as a second backend)
|
(configure netdata to use this as an external database)
|
||||||
|
|
||||||
BACKEND_HOST The real backend host
|
EXPORTING_HOST The real host for the external database
|
||||||
BACKEND_PORT The real backend port
|
EXPORTING_PORT The real port for the external database
|
||||||
|
|
||||||
This script can act as fallback backend for netdata.
|
This script can act as fallback database for netdata.
|
||||||
It will receive metrics from netdata, save them to
|
It will receive metrics from netdata, save them to
|
||||||
${FILE}
|
${FILE}
|
||||||
and once netdata reconnects to the real-backend, this script
|
and once netdata reconnects to the real external database,
|
||||||
will push all metrics collected to the real-backend too and
|
this script will push all metrics collected to the real
|
||||||
wait for a failure to happen again.
|
external database too and wait for a failure to happen again.
|
||||||
|
|
||||||
Only one netdata can connect to this script at a time.
|
Only one netdata can connect to this script at a time.
|
||||||
If you need fallback for multiple netdata, run this script
|
If you need fallback for multiple netdata, run this script
|
||||||
@ -152,7 +152,7 @@ Usage:
|
|||||||
|
|
||||||
You can run me in the background with this:
|
You can run me in the background with this:
|
||||||
|
|
||||||
screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
|
screen -d -m "${0}" start PORT [EXPORTING_HOST EXPORTING_PORT]
|
||||||
EOF
|
EOF
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -109,7 +109,7 @@ calculated_number exporting_calculate_value_from_stored_data(
|
|||||||
if (unlikely(before < first_t || after > last_t)) {
|
if (unlikely(before < first_t || after > last_t)) {
|
||||||
// the chart has not been updated in the wanted timeframe
|
// the chart has not been updated in the wanted timeframe
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
|
"EXPORTING: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
|
||||||
host->hostname,
|
host->hostname,
|
||||||
st->id,
|
st->id,
|
||||||
@ -143,7 +143,7 @@ calculated_number exporting_calculate_value_from_stored_data(
|
|||||||
rd->state->query_ops.finalize(&handle);
|
rd->state->query_ops.finalize(&handle);
|
||||||
if (unlikely(!counter)) {
|
if (unlikely(!counter)) {
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: %s.%s.%s: no values stored in database for range %lu to %lu",
|
"EXPORTING: %s.%s.%s: no values stored in database for range %lu to %lu",
|
||||||
host->hostname,
|
host->hostname,
|
||||||
st->id,
|
st->id,
|
||||||
|
|||||||
@ -35,7 +35,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
|
|||||||
else {
|
else {
|
||||||
rrdset_flag_set(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
rrdset_flag_set(st, RRDSET_FLAG_EXPORTING_IGNORE);
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
|
"EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
|
||||||
st->id,
|
st->id,
|
||||||
host->hostname);
|
host->hostname);
|
||||||
@ -45,7 +45,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
|
|||||||
|
|
||||||
if (unlikely(!rrdset_is_available_for_exporting_and_alarms(st))) {
|
if (unlikely(!rrdset_is_available_for_exporting_and_alarms(st))) {
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
|
"EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
|
||||||
st->id,
|
st->id,
|
||||||
host->hostname);
|
host->hostname);
|
||||||
@ -56,7 +56,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
|
|||||||
st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
|
st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
|
||||||
!(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
|
!(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
|
"EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
|
||||||
st->id,
|
st->id,
|
||||||
host->hostname,
|
host->hostname,
|
||||||
|
|||||||
@ -236,7 +236,7 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *
|
|||||||
|
|
||||||
if (unlikely(rd->last_collected_time.tv_sec < instance->after)) {
|
if (unlikely(rd->last_collected_time.tv_sec < instance->after)) {
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: not sending dimension '%s' of chart '%s' from host '%s', "
|
"EXPORTING: not sending dimension '%s' of chart '%s' from host '%s', "
|
||||||
"its last data collection (%lu) is not within our timeframe (%lu to %lu)",
|
"its last data collection (%lu) is not within our timeframe (%lu to %lu)",
|
||||||
rd->id, rd->rrdset->id,
|
rd->id, rd->rrdset->id,
|
||||||
|
|||||||
@ -141,7 +141,7 @@ void pubsub_connector_worker(void *instance_p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND, "EXPORTING: pubsub_publish(): project = %s, topic = %s, buffer = %zu",
|
D_EXPORTING, "EXPORTING: pubsub_publish(): project = %s, topic = %s, buffer = %zu",
|
||||||
connector_specific_config->project_id, connector_specific_config->topic_id, buffer_len);
|
connector_specific_config->project_id, connector_specific_config->topic_id, buffer_len);
|
||||||
|
|
||||||
if (pubsub_publish((void *)connector_specific_data, error_message, stats->buffered_metrics, buffer_len)) {
|
if (pubsub_publish((void *)connector_specific_data, error_message, stats->buffered_metrics, buffer_len)) {
|
||||||
|
|||||||
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
#include "exporting_engine.h"
|
#include "exporting_engine.h"
|
||||||
|
|
||||||
|
EXPORTING_OPTIONS global_exporting_options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
|
||||||
|
const char *global_exporting_prefix = "netdata";
|
||||||
|
|
||||||
struct config exporting_config = { .first_section = NULL,
|
struct config exporting_config = { .first_section = NULL,
|
||||||
.last_section = NULL,
|
.last_section = NULL,
|
||||||
.mutex = NETDATA_MUTEX_INITIALIZER,
|
.mutex = NETDATA_MUTEX_INITIALIZER,
|
||||||
@ -160,7 +163,7 @@ EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type)
|
|||||||
return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
|
return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
|
inline EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
|
||||||
{
|
{
|
||||||
if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
|
if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
|
||||||
!strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
|
!strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
|
||||||
@ -194,7 +197,7 @@ struct engine *read_exporting_config()
|
|||||||
static struct engine *engine = NULL;
|
static struct engine *engine = NULL;
|
||||||
struct connector_instance_list {
|
struct connector_instance_list {
|
||||||
struct connector_instance local_ci;
|
struct connector_instance local_ci;
|
||||||
EXPORTING_CONNECTOR_TYPE backend_type;
|
EXPORTING_CONNECTOR_TYPE exporting_type;
|
||||||
|
|
||||||
struct connector_instance_list *next;
|
struct connector_instance_list *next;
|
||||||
};
|
};
|
||||||
@ -238,21 +241,14 @@ struct engine *read_exporting_config()
|
|||||||
prometheus_exporter_instance->config.update_every =
|
prometheus_exporter_instance->config.update_every =
|
||||||
prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
|
prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
|
||||||
|
|
||||||
// wait for backend subsystem to be initialized
|
prometheus_exporter_instance->config.options |= global_exporting_options & EXPORTING_OPTIONS_SOURCE_BITS;
|
||||||
for (int retries = 0; !global_backend_source && retries < 1000; retries++)
|
|
||||||
sleep_usec(10000);
|
|
||||||
|
|
||||||
if (!global_backend_source)
|
char *data_source = prometheus_config_get("data source", "average");
|
||||||
global_backend_source = "average";
|
|
||||||
|
|
||||||
prometheus_exporter_instance->config.options |= global_backend_options & EXPORTING_OPTIONS_SOURCE_BITS;
|
|
||||||
|
|
||||||
char *data_source = prometheus_config_get("data source", global_backend_source);
|
|
||||||
prometheus_exporter_instance->config.options =
|
prometheus_exporter_instance->config.options =
|
||||||
exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
|
exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
|
||||||
|
|
||||||
if (prometheus_config_get_boolean(
|
if (prometheus_config_get_boolean(
|
||||||
"send names instead of ids", global_backend_options & EXPORTING_OPTION_SEND_NAMES))
|
"send names instead of ids", global_exporting_options & EXPORTING_OPTION_SEND_NAMES))
|
||||||
prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
|
prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
|
||||||
else
|
else
|
||||||
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
|
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
|
||||||
@ -268,18 +264,17 @@ struct engine *read_exporting_config()
|
|||||||
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
|
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
|
||||||
|
|
||||||
prometheus_exporter_instance->config.charts_pattern = simple_pattern_create(
|
prometheus_exporter_instance->config.charts_pattern = simple_pattern_create(
|
||||||
prometheus_config_get("send charts matching", global_backend_send_charts_matching),
|
prometheus_config_get("send charts matching", "*"),
|
||||||
NULL,
|
NULL,
|
||||||
SIMPLE_PATTERN_EXACT);
|
SIMPLE_PATTERN_EXACT);
|
||||||
prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
|
prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
|
||||||
prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
|
prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
|
||||||
|
|
||||||
prometheus_exporter_instance->config.prefix = prometheus_config_get("prefix", global_backend_prefix);
|
prometheus_exporter_instance->config.prefix = prometheus_config_get("prefix", global_exporting_prefix);
|
||||||
|
|
||||||
prometheus_exporter_instance->config.initialized = 1;
|
prometheus_exporter_instance->config.initialized = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: change BACKEND to EXPORTING
|
|
||||||
while (get_connector_instance(&local_ci)) {
|
while (get_connector_instance(&local_ci)) {
|
||||||
info("Processing connector instance (%s)", local_ci.instance_name);
|
info("Processing connector instance (%s)", local_ci.instance_name);
|
||||||
|
|
||||||
@ -290,7 +285,7 @@ struct engine *read_exporting_config()
|
|||||||
|
|
||||||
tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
|
tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
|
||||||
memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
|
memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
|
||||||
tmp_ci_list->backend_type = exporting_select_type(local_ci.connector_name);
|
tmp_ci_list->exporting_type = exporting_select_type(local_ci.connector_name);
|
||||||
tmp_ci_list->next = tmp_ci_list_prev;
|
tmp_ci_list->next = tmp_ci_list_prev;
|
||||||
tmp_ci_list_prev = tmp_ci_list;
|
tmp_ci_list_prev = tmp_ci_list;
|
||||||
instances_to_activate++;
|
instances_to_activate++;
|
||||||
@ -320,34 +315,34 @@ struct engine *read_exporting_config()
|
|||||||
|
|
||||||
info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
|
info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
|
||||||
|
|
||||||
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
|
if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
|
||||||
error("Unknown exporting connector type");
|
error("Unknown exporting connector type");
|
||||||
goto next_connector_instance;
|
goto next_connector_instance;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
|
#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
|
||||||
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
|
if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
|
||||||
error("Prometheus Remote Write support isn't compiled");
|
error("Prometheus Remote Write support isn't compiled");
|
||||||
goto next_connector_instance;
|
goto next_connector_instance;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef HAVE_KINESIS
|
#ifndef HAVE_KINESIS
|
||||||
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
|
if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
|
||||||
error("AWS Kinesis support isn't compiled");
|
error("AWS Kinesis support isn't compiled");
|
||||||
goto next_connector_instance;
|
goto next_connector_instance;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef ENABLE_EXPORTING_PUBSUB
|
#ifndef ENABLE_EXPORTING_PUBSUB
|
||||||
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
|
if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
|
||||||
error("Google Cloud Pub/Sub support isn't compiled");
|
error("Google Cloud Pub/Sub support isn't compiled");
|
||||||
goto next_connector_instance;
|
goto next_connector_instance;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef HAVE_MONGOC
|
#ifndef HAVE_MONGOC
|
||||||
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
|
if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
|
||||||
error("MongoDB support isn't compiled");
|
error("MongoDB support isn't compiled");
|
||||||
goto next_connector_instance;
|
goto next_connector_instance;
|
||||||
}
|
}
|
||||||
@ -358,7 +353,7 @@ struct engine *read_exporting_config()
|
|||||||
engine->instance_root = tmp_instance;
|
engine->instance_root = tmp_instance;
|
||||||
|
|
||||||
tmp_instance->engine = engine;
|
tmp_instance->engine = engine;
|
||||||
tmp_instance->config.type = tmp_ci_list->backend_type;
|
tmp_instance->config.type = tmp_ci_list->exporting_type;
|
||||||
|
|
||||||
instance_name = tmp_ci_list->local_ci.instance_name;
|
instance_name = tmp_ci_list->local_ci.instance_name;
|
||||||
|
|
||||||
|
|||||||
@ -41,7 +41,7 @@ int exporting_discard_response(BUFFER *buffer, struct instance *instance) {
|
|||||||
*d = '\0';
|
*d = '\0';
|
||||||
|
|
||||||
debug(
|
debug(
|
||||||
D_BACKEND,
|
D_EXPORTING,
|
||||||
"EXPORTING: received %zu bytes from %s connector instance. Ignoring them. Sample: '%s'",
|
"EXPORTING: received %zu bytes from %s connector instance. Ignoring them. Sample: '%s'",
|
||||||
buffer_strlen(buffer),
|
buffer_strlen(buffer),
|
||||||
instance->config.name,
|
instance->config.name,
|
||||||
|
|||||||
@ -14,11 +14,6 @@ char *netdata_configured_hostname = "test_global_host";
|
|||||||
|
|
||||||
char log_line[MAX_LOG_LINE + 1];
|
char log_line[MAX_LOG_LINE + 1];
|
||||||
|
|
||||||
BACKEND_OPTIONS global_backend_options = 0;
|
|
||||||
const char *global_backend_source = "average";
|
|
||||||
const char *global_backend_prefix = "netdata";
|
|
||||||
const char *global_backend_send_charts_matching = "*";
|
|
||||||
|
|
||||||
void init_connectors_in_tests(struct engine *engine)
|
void init_connectors_in_tests(struct engine *engine)
|
||||||
{
|
{
|
||||||
expect_function_call(__wrap_now_realtime_sec);
|
expect_function_call(__wrap_now_realtime_sec);
|
||||||
@ -235,7 +230,7 @@ static void test_rrdhost_is_exportable(void **state)
|
|||||||
assert_string_equal(log_line, "enabled exporting of host 'localhost' for instance 'instance_name'");
|
assert_string_equal(log_line, "enabled exporting of host 'localhost' for instance 'instance_name'");
|
||||||
|
|
||||||
assert_ptr_not_equal(localhost->exporting_flags, NULL);
|
assert_ptr_not_equal(localhost->exporting_flags, NULL);
|
||||||
assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_SEND);
|
assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_EXPORTING_SEND);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_false_rrdhost_is_exportable(void **state)
|
static void test_false_rrdhost_is_exportable(void **state)
|
||||||
@ -255,7 +250,7 @@ static void test_false_rrdhost_is_exportable(void **state)
|
|||||||
assert_string_equal(log_line, "disabled exporting of host 'localhost' for instance 'instance_name'");
|
assert_string_equal(log_line, "disabled exporting of host 'localhost' for instance 'instance_name'");
|
||||||
|
|
||||||
assert_ptr_not_equal(localhost->exporting_flags, NULL);
|
assert_ptr_not_equal(localhost->exporting_flags, NULL);
|
||||||
assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_DONT_SEND);
|
assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_EXPORTING_DONT_SEND);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_rrdset_is_exportable(void **state)
|
static void test_rrdset_is_exportable(void **state)
|
||||||
|
|||||||
@ -27,7 +27,6 @@ dist_healthconfig_DATA = \
|
|||||||
health.d/adaptec_raid.conf \
|
health.d/adaptec_raid.conf \
|
||||||
health.d/anomalies.conf \
|
health.d/anomalies.conf \
|
||||||
health.d/apcupsd.conf \
|
health.d/apcupsd.conf \
|
||||||
health.d/backend.conf \
|
|
||||||
health.d/bcache.conf \
|
health.d/bcache.conf \
|
||||||
health.d/beanstalkd.conf \
|
health.d/beanstalkd.conf \
|
||||||
health.d/bind_rndc.conf \
|
health.d/bind_rndc.conf \
|
||||||
|
|||||||
@ -1,42 +0,0 @@
|
|||||||
# Alert that backends subsystem will be disabled soon
|
|
||||||
alarm: backend_metrics_eol
|
|
||||||
on: netdata.backend_metrics
|
|
||||||
class: Errors
|
|
||||||
type: Netdata
|
|
||||||
component: Exporting engine
|
|
||||||
units: boolean
|
|
||||||
calc: $now - $last_collected_t
|
|
||||||
every: 1m
|
|
||||||
warn: $this > 0
|
|
||||||
delay: down 5m multiplier 1.5 max 1h
|
|
||||||
info: the backends subsystem is deprecated and will be removed soon. Migrate your configuration to exporting.conf.
|
|
||||||
to: sysadmin
|
|
||||||
|
|
||||||
# make sure we are sending data to backend
|
|
||||||
|
|
||||||
alarm: backend_last_buffering
|
|
||||||
on: netdata.backend_metrics
|
|
||||||
class: Latency
|
|
||||||
type: Netdata
|
|
||||||
component: Exporting engine
|
|
||||||
calc: $now - $last_collected_t
|
|
||||||
units: seconds ago
|
|
||||||
every: 10s
|
|
||||||
warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
|
|
||||||
crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
|
|
||||||
delay: down 5m multiplier 1.5 max 1h
|
|
||||||
info: number of seconds since the last successful buffering of backend data
|
|
||||||
to: dba
|
|
||||||
|
|
||||||
alarm: backend_metrics_sent
|
|
||||||
on: netdata.backend_metrics
|
|
||||||
class: Workload
|
|
||||||
type: Netdata
|
|
||||||
component: Exporting engine
|
|
||||||
units: %
|
|
||||||
calc: abs($sent) * 100 / abs($buffered)
|
|
||||||
every: 10s
|
|
||||||
warn: $this != 100
|
|
||||||
delay: down 5m multiplier 1.5 max 1h
|
|
||||||
info: percentage of metrics sent to the backend server
|
|
||||||
to: dba
|
|
||||||
@ -13,5 +13,4 @@ Also, they are super fast in printing and appending data to the string and its `
|
|||||||
is just a lookup (it does not traverse the string).
|
is just a lookup (it does not traverse the string).
|
||||||
|
|
||||||
Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or
|
Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or
|
||||||
to backend databases.
|
to external databases.
|
||||||
|
|
||||||
|
|||||||
@ -591,7 +591,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons
|
|||||||
int line = 0;
|
int line = 0;
|
||||||
struct section *co = NULL;
|
struct section *co = NULL;
|
||||||
int is_exporter_config = 0;
|
int is_exporter_config = 0;
|
||||||
int _backends = 0; // number of backend sections we have
|
int _connectors = 0; // number of exporting connector sections we have
|
||||||
char working_instance[CONFIG_MAX_NAME + 1];
|
char working_instance[CONFIG_MAX_NAME + 1];
|
||||||
char working_connector[CONFIG_MAX_NAME + 1];
|
char working_connector[CONFIG_MAX_NAME + 1];
|
||||||
struct section *working_connector_section = NULL;
|
struct section *working_connector_section = NULL;
|
||||||
@ -641,8 +641,8 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons
|
|||||||
strncpy(working_connector, s, CONFIG_MAX_NAME);
|
strncpy(working_connector, s, CONFIG_MAX_NAME);
|
||||||
s = s + rc + 1;
|
s = s + rc + 1;
|
||||||
if (unlikely(!(*s))) {
|
if (unlikely(!(*s))) {
|
||||||
_backends++;
|
_connectors++;
|
||||||
sprintf(buffer, "instance_%d", _backends);
|
sprintf(buffer, "instance_%d", _connectors);
|
||||||
s = buffer;
|
s = buffer;
|
||||||
}
|
}
|
||||||
strncpy(working_instance, s, CONFIG_MAX_NAME);
|
strncpy(working_instance, s, CONFIG_MAX_NAME);
|
||||||
@ -793,7 +793,6 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
|
|||||||
|| !strcmp(co->name, CONFIG_SECTION_CLOUD)
|
|| !strcmp(co->name, CONFIG_SECTION_CLOUD)
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_REGISTRY)
|
|| !strcmp(co->name, CONFIG_SECTION_REGISTRY)
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_HEALTH)
|
|| !strcmp(co->name, CONFIG_SECTION_HEALTH)
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_BACKEND)
|
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_STREAM)
|
|| !strcmp(co->name, CONFIG_SECTION_STREAM)
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_HOST_LABEL)
|
|| !strcmp(co->name, CONFIG_SECTION_HOST_LABEL)
|
||||||
|| !strcmp(co->name, CONFIG_SECTION_ML)
|
|| !strcmp(co->name, CONFIG_SECTION_ML)
|
||||||
|
|||||||
@ -89,7 +89,6 @@
|
|||||||
#define CONFIG_SECTION_CLOUD "cloud"
|
#define CONFIG_SECTION_CLOUD "cloud"
|
||||||
#define CONFIG_SECTION_REGISTRY "registry"
|
#define CONFIG_SECTION_REGISTRY "registry"
|
||||||
#define CONFIG_SECTION_HEALTH "health"
|
#define CONFIG_SECTION_HEALTH "health"
|
||||||
#define CONFIG_SECTION_BACKEND "backend"
|
|
||||||
#define CONFIG_SECTION_STREAM "stream"
|
#define CONFIG_SECTION_STREAM "stream"
|
||||||
#define CONFIG_SECTION_ML "ml"
|
#define CONFIG_SECTION_ML "ml"
|
||||||
#define CONFIG_SECTION_EXPORTING "exporting:global"
|
#define CONFIG_SECTION_EXPORTING "exporting:global"
|
||||||
|
|||||||
@ -36,7 +36,7 @@ extern "C" {
|
|||||||
#define D_CONNECT_TO 0x0000000001000000
|
#define D_CONNECT_TO 0x0000000001000000
|
||||||
#define D_RRDHOST 0x0000000002000000
|
#define D_RRDHOST 0x0000000002000000
|
||||||
#define D_LOCKS 0x0000000004000000
|
#define D_LOCKS 0x0000000004000000
|
||||||
#define D_BACKEND 0x0000000008000000
|
#define D_EXPORTING 0x0000000008000000
|
||||||
#define D_STATSD 0x0000000010000000
|
#define D_STATSD 0x0000000010000000
|
||||||
#define D_POLLFD 0x0000000020000000
|
#define D_POLLFD 0x0000000020000000
|
||||||
#define D_STREAM 0x0000000040000000
|
#define D_STREAM 0x0000000040000000
|
||||||
|
|||||||
@ -245,14 +245,14 @@ USAGE: ${PROGRAM} [options]
|
|||||||
--disable-plugin-nfacct Explicitly disable the nfacct plugin.
|
--disable-plugin-nfacct Explicitly disable the nfacct plugin.
|
||||||
--enable-plugin-xenstat Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available.
|
--enable-plugin-xenstat Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available.
|
||||||
--disable-plugin-xenstat Explicitly disable the xenstat plugin.
|
--disable-plugin-xenstat Explicitly disable the xenstat plugin.
|
||||||
--enable-backend-kinesis Enable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and its dependencies
|
--enable-exporting-kinesis Enable AWS Kinesis exporting connector. Default: enable it when libaws_cpp_sdk_kinesis
|
||||||
are available.
|
and its dependencies are available.
|
||||||
--disable-backend-kinesis Explicitly disable AWS Kinesis backend.
|
--disable-exporting-kinesis Explicitly disable AWS Kinesis exporting connector.
|
||||||
--enable-backend-prometheus-remote-write Enable Prometheus remote write backend. Default: enable it when libprotobuf and
|
--enable-exporting-prometheus-remote-write Enable Prometheus remote write exporting connector. Default: enable it
|
||||||
libsnappy are available.
|
when libprotobuf and libsnappy are available.
|
||||||
--disable-backend-prometheus-remote-write Explicitly disable Prometheus remote write backend.
|
--disable-exporting-prometheus-remote-write Explicitly disable Prometheus remote write exporting connector.
|
||||||
--enable-backend-mongodb Enable MongoDB backend. Default: enable it when libmongoc is available.
|
--enable-exporting-mongodb Enable MongoDB exporting connector. Default: enable it when libmongoc is available.
|
||||||
--disable-backend-mongodb Explicitly disable MongoDB backend.
|
--disable-exporting-mongodb Explicitly disable MongoDB exporting connector.
|
||||||
--enable-exporting-pubsub Enable Google Cloud PubSub exporting connector. Default: enable it when
|
--enable-exporting-pubsub Enable Google Cloud PubSub exporting connector. Default: enable it when
|
||||||
libgoogle_cloud_cpp_pubsub_protos and its dependencies are available.
|
libgoogle_cloud_cpp_pubsub_protos and its dependencies are available.
|
||||||
--disable-exporting-pubsub Explicitly disable Google Cloud PubSub exporting connector.
|
--disable-exporting-pubsub Explicitly disable Google Cloud PubSub exporting connector.
|
||||||
@ -345,15 +345,20 @@ while [ -n "${1}" ]; do
|
|||||||
"--disable-plugin-nfacct") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-plugin-nfacct)}" | sed 's/$/ --disable-plugin-nfacct/g')" ;;
|
"--disable-plugin-nfacct") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-plugin-nfacct)}" | sed 's/$/ --disable-plugin-nfacct/g')" ;;
|
||||||
"--enable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-plugin-xenstat)}" | sed 's/$/ --enable-plugin-xenstat/g')" ;;
|
"--enable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-plugin-xenstat)}" | sed 's/$/ --enable-plugin-xenstat/g')" ;;
|
||||||
"--disable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-plugin-xenstat)}" | sed 's/$/ --disable-plugin-xenstat/g')" ;;
|
"--disable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-plugin-xenstat)}" | sed 's/$/ --disable-plugin-xenstat/g')" ;;
|
||||||
"--enable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-backend-kinesis)}" | sed 's/$/ --enable-backend-kinesis/g')" ;;
|
"--enable-exporting-kinesis" | "--enable-backend-kinesis")
|
||||||
"--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-backend-kinesis)}" | sed 's/$/ --disable-backend-kinesis/g')" ;;
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-exporting-kinesis)}" | sed 's/$/ --enable-exporting-kinesis/g')" ;;
|
||||||
"--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-backend-prometheus-remote-write)}" | sed 's/$/ --enable-backend-prometheus-remote-write/g')" ;;
|
"--disable-exporting-kinesis" | "--disable-backend-kinesis")
|
||||||
"--disable-backend-prometheus-remote-write")
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-exporting-kinesis)}" | sed 's/$/ --disable-exporting-kinesis/g')" ;;
|
||||||
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-backend-prometheus-remote-write)}" | sed 's/$/ --disable-backend-prometheus-remote-write/g')"
|
"--enable-exporting-prometheus-remote-write" | "--enable-backend-prometheus-remote-write")
|
||||||
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-exporting-prometheus-remote-write)}" | sed 's/$/ --enable-exporting-prometheus-remote-write/g')" ;;
|
||||||
|
"--disable-exporting-prometheus-remote-write" | "--disable-backend-prometheus-remote-write")
|
||||||
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-exporting-prometheus-remote-write)}" | sed 's/$/ --disable-exporting-prometheus-remote-write/g')"
|
||||||
NETDATA_DISABLE_PROMETHEUS=1
|
NETDATA_DISABLE_PROMETHEUS=1
|
||||||
;;
|
;;
|
||||||
"--enable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-backend-mongodb)}" | sed 's/$/ --enable-backend-mongodb/g')" ;;
|
"--enable-exporting-mongodb" | "--enable-backend-mongodb")
|
||||||
"--disable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-backend-mongodb)}" | sed 's/$/ --disable-backend-mongodb/g')" ;;
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-exporting-mongodb)}" | sed 's/$/ --enable-exporting-mongodb/g')" ;;
|
||||||
|
"--disable-exporting-mongodb" | "--disable-backend-mongodb")
|
||||||
|
NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-exporting-mongodb)}" | sed 's/$/ --disable-exporting-mongodb/g')" ;;
|
||||||
"--enable-exporting-pubsub") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-exporting-pubsub)}" | sed 's/$/ --enable-exporting-pubsub/g')" ;;
|
"--enable-exporting-pubsub") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-exporting-pubsub)}" | sed 's/$/ --enable-exporting-pubsub/g')" ;;
|
||||||
"--disable-exporting-pubsub") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-exporting-pubsub)}" | sed 's/$/ --disable-exporting-pubsub/g')" ;;
|
"--disable-exporting-pubsub") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--disable-exporting-pubsub)}" | sed 's/$/ --disable-exporting-pubsub/g')" ;;
|
||||||
"--enable-lto") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-lto)}" | sed 's/$/ --enable-lto/g')" ;;
|
"--enable-lto") NETDATA_CONFIGURE_OPTIONS="$(echo "${NETDATA_CONFIGURE_OPTIONS%--enable-lto)}" | sed 's/$/ --enable-lto/g')" ;;
|
||||||
|
|||||||
@ -93,12 +93,11 @@ The `netdata-updater.sh` script will update your Agent.
|
|||||||
| `--disable-plugin-nfacct` | Disable nfacct plugin. Default: enable it when libmnl and libnetfilter_acct are available|
|
| `--disable-plugin-nfacct` | Disable nfacct plugin. Default: enable it when libmnl and libnetfilter_acct are available|
|
||||||
| `--enable-plugin-xenstat` | Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available|
|
| `--enable-plugin-xenstat` | Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available|
|
||||||
| `--disable-plugin-xenstat` | Disable the xenstat plugin|
|
| `--disable-plugin-xenstat` | Disable the xenstat plugin|
|
||||||
| `--enable-backend-kinesis` | Enable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries (it depends on are available)|
|
| `--disable-exporting-kinesis` | Disable AWS Kinesis exporting connector. Default: enable it when libaws_cpp_sdk_kinesis and libraries (it depends on are available)|
|
||||||
| `--disable-backend-kinesis` | Disable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries (it depends on are available)|
|
| `--enable-exporting-prometheus-remote-write` | Enable Prometheus remote write exporting connector. Default: enable it when libprotobuf and libsnappy are available|
|
||||||
| `--enable-backend-prometheus-remote-write` | Enable Prometheus remote write backend. Default: enable it when libprotobuf and libsnappy are available|
|
| `--disable-exporting-prometheus-remote-write` | Disable Prometheus remote write exporting connector. Default: enable it when libprotobuf and libsnappy are available|
|
||||||
| `--disable-backend-prometheus-remote-write` | Disable Prometheus remote write backend. Default: enable it when libprotobuf and libsnappy are available|
|
| `--enable-exporting-mongodb` | Enable MongoDB exporting connector. Default: enable it when libmongoc is available|
|
||||||
| `--enable-backend-mongodb` | Enable MongoDB backend. Default: enable it when libmongoc is available|
|
| `--disable-exporting-mongodb` | Disable MongoDB exporting connector|
|
||||||
| `--disable-backend-mongodb` | Disable MongoDB backend|
|
|
||||||
| `--enable-lto` | Enable Link-Time-Optimization. Default: enabled|
|
| `--enable-lto` | Enable Link-Time-Optimization. Default: enabled|
|
||||||
| `--disable-lto` | Disable Link-Time-Optimization. Default: enabled|
|
| `--disable-lto` | Disable Link-Time-Optimization. Default: enabled|
|
||||||
| `--disable-x86-sse` | Disable SSE instructions. By default SSE optimizations are enabled|
|
| `--disable-x86-sse` | Disable SSE instructions. By default SSE optimizations are enabled|
|
||||||
|
|||||||
@ -67,18 +67,18 @@ Alerts for the child can be triggered by any of the involved hosts that maintain
|
|||||||
You can daisy-chain any number of Netdata, each with or without a database and
|
You can daisy-chain any number of Netdata, each with or without a database and
|
||||||
with or without alerts for the child metrics.
|
with or without alerts for the child metrics.
|
||||||
|
|
||||||
### Mix and match with backends
|
### Mix and match with exporting engine
|
||||||
|
|
||||||
All nodes that maintain a database can also send their data to a backend database.
|
All nodes that maintain a database can also send their data to an external database.
|
||||||
This allows quite complex setups.
|
This allows quite complex setups.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
1. Netdata nodes `A` and `B` do not maintain a database and stream metrics to Netdata node `C`(live streaming functionality).
|
1. Netdata nodes `A` and `B` do not maintain a database and stream metrics to Netdata node `C`(live streaming functionality).
|
||||||
2. Netdata node `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (backends functionality).
|
2. Netdata node `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (exporting functionality).
|
||||||
3. Netdata node `C` also streams data for `A`, `B`, `C` to Netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality).
|
3. Netdata node `C` also streams data for `A`, `B`, `C` to Netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality).
|
||||||
4. Netdata node `D` is just a proxy, without a database, that streams all data to a remote site at Netdata `H`.
|
4. Netdata node `D` is just a proxy, without a database, that streams all data to a remote site at Netdata `H`.
|
||||||
5. Netdata node `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (backends functionality)
|
5. Netdata node `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (exporting functionality)
|
||||||
6. Alerts are triggered by `H` for all hosts.
|
6. Alerts are triggered by `H` for all hosts.
|
||||||
7. Users can use all Netdata nodes that maintain a database to view metrics (i.e. at `H` all hosts can be viewed).
|
7. Users can use all Netdata nodes that maintain a database to view metrics (i.e. at `H` all hosts can be viewed).
|
||||||
|
|
||||||
@ -107,15 +107,7 @@ This also disables the registry (there cannot be a registry without an API).
|
|||||||
requests from its child nodes. 0 sets no limit, 1 means maximum once every second. If this is set, you may see error log
|
requests from its child nodes. 0 sets no limit, 1 means maximum once every second. If this is set, you may see error log
|
||||||
entries "... too busy to accept new streaming request. Will be allowed in X secs".
|
entries "... too busy to accept new streaming request. Will be allowed in X secs".
|
||||||
|
|
||||||
```
|
You can [use](docs/agent/exporting#configuration) the exporting engine to configure data archiving to an external database (it archives all databases maintained on
|
||||||
[backend]
|
|
||||||
enabled = yes | no
|
|
||||||
type = graphite | opentsdb
|
|
||||||
destination = IP:PORT ...
|
|
||||||
update every = 10
|
|
||||||
```
|
|
||||||
|
|
||||||
`[backend]` configures data archiving to a backend (it archives all databases maintained on
|
|
||||||
this host).
|
this host).
|
||||||
|
|
||||||
### Streaming configuration
|
### Streaming configuration
|
||||||
@ -156,7 +148,7 @@ a proxy).
|
|||||||
```
|
```
|
||||||
This is an overview of how these options can be combined:
|
This is an overview of how these options can be combined:
|
||||||
|
|
||||||
| target|memory<br/>mode|web<br/>mode|stream<br/>enabled|backend|alarms|dashboard|
|
| target|memory<br/>mode|web<br/>mode|stream<br/>enabled|exporting|alarms|dashboard|
|
||||||
|------|:-------------:|:----------:|:----------------:|:-----:|:----:|:-------:|
|
|------|:-------------:|:----------:|:----------------:|:-----:|:----:|:-------:|
|
||||||
| headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no|
|
| headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no|
|
||||||
| headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no|
|
| headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no|
|
||||||
|
|||||||
@ -6,14 +6,14 @@ validate_metrics() {
|
|||||||
|
|
||||||
curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd×tamps=no${params}" |
|
curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd×tamps=no${params}" |
|
||||||
grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
|
grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
|
||||||
sed -ne 's/{.*//p' | sort | uniq > tests/backends/new-${fname}
|
sed -ne 's/{.*//p' | sort | uniq > tests/exportings/new-${fname}
|
||||||
diff tests/backends/${fname} tests/backends/new-${fname}
|
diff tests/exportings/${fname} tests/exportings/new-${fname}
|
||||||
rm tests/backends/new-${fname}
|
rm tests/exportings/new-${fname}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if [ ! -f .gitignore ]; then
|
if [ ! -f .gitignore ]; then
|
||||||
echo "Need to run as ./tests/backends/$(basename "$0") from top level directory of git repository" >&2
|
echo "Need to run as ./tests/exportings/$(basename "$0") from top level directory of git repository" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -25,17 +25,17 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
|
|||||||
if (prometheus_exporter_instance)
|
if (prometheus_exporter_instance)
|
||||||
prometheus_exporting_options = prometheus_exporter_instance->config.options;
|
prometheus_exporting_options = prometheus_exporter_instance->config.options;
|
||||||
else
|
else
|
||||||
prometheus_exporting_options = global_backend_options;
|
prometheus_exporting_options = global_exporting_options;
|
||||||
|
|
||||||
PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options =
|
PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options =
|
||||||
PROMETHEUS_OUTPUT_TIMESTAMPS |
|
PROMETHEUS_OUTPUT_TIMESTAMPS |
|
||||||
((prometheus_exporting_options & BACKEND_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0);
|
((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0);
|
||||||
|
|
||||||
const char *prometheus_prefix;
|
const char *prometheus_prefix;
|
||||||
if (prometheus_exporter_instance)
|
if (prometheus_exporter_instance)
|
||||||
prometheus_prefix = prometheus_exporter_instance->config.prefix;
|
prometheus_prefix = prometheus_exporter_instance->config.prefix;
|
||||||
else
|
else
|
||||||
prometheus_prefix = global_backend_prefix;
|
prometheus_prefix = global_exporting_prefix;
|
||||||
|
|
||||||
while(url) {
|
while(url) {
|
||||||
char *value = mystrsep(&url, "&");
|
char *value = mystrsep(&url, "&");
|
||||||
@ -64,7 +64,7 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
|
|||||||
prometheus_prefix = value;
|
prometheus_prefix = value;
|
||||||
}
|
}
|
||||||
else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
|
else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
|
||||||
prometheus_exporting_options = backend_parse_data_source(value, prometheus_exporting_options);
|
prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
int i;
|
int i;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user