diff --git a/ansible/roles/stack-sunbird/templates/lms-service_logback.xml b/ansible/roles/stack-sunbird/templates/lms-service_logback.xml index 27869184f0aeb59201fb8afe47e73648ca61c4b3..9bc4eba705fe6aebb322ce5b805192383de57b32 100644 --- a/ansible/roles/stack-sunbird/templates/lms-service_logback.xml +++ b/ansible/roles/stack-sunbird/templates/lms-service_logback.xml @@ -15,21 +15,70 @@ <appender-ref ref="STDOUT" /> </appender> - <appender name="queryLoggerAppender" class="ch.qos.logback.core.ConsoleAppender"> - <encoder class="net.logstash.logback.encoder.LogstashEncoder"> - <layout class="ch.qos.logback.contrib.json.classic.JsonLayout"> - <timestampFormat>yyyy-MM-dd'T'HH:mm:ss.SSSX</timestampFormat> - <timestampFormatTimezoneId>Etc/UTC</timestampFormatTimezoneId> - <fieldNames> - <timestamp>timestamp</timestamp> - <message>msg</message> - <logger>lname</logger> - <thread>tname</thread> - <levelValue>[ignore]</levelValue> - <version>[ignore]</version> - </fieldNames> - </layout> + + <logger name="play" level="INFO" /> + <logger name="defaultLogger" level="INFO" /> + <!-- Telemetry Loggers--> + + <root level="INFO"> + <appender-ref ref="ASYNCSTDOUT" /> + </root> + + + <appender name="kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender"> + <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> + <pattern>%msg</pattern> </encoder> + + <topic>${sunbird_env_name}.telemetry.raw</topic> + <!-- ensure that every message sent by the executing host is partitioned to the same partition strategy --> + <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" /> + <!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages --> + <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" /> + + <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) --> + <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs --> + <!-- bootstrap.servers is the only mandatory producerConfig --> + <producerConfig>bootstrap.servers=${kafka_urls}</producerConfig> + <!-- don't wait for a broker to ack the reception of a batch. --> + <producerConfig>acks=0</producerConfig> + <!-- wait up to 1000ms and collect log messages before sending them as a batch --> + <producerConfig>linger.ms=15000</producerConfig> + <!-- even if the producer buffer runs full, do not block the application but start to drop messages --> + <producerConfig>max.block.ms=0</producerConfig> + <!-- define a client-id that you use to identify yourself against the kafka broker --> + <producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig> + + <!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. --> + + </appender> + + <appender name="query-kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender"> + <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> + <pattern>%msg</pattern> + </encoder> + + <topic>${sunbird_env_name}.db.query.events</topic> + <!-- ensure that every message sent by the executing host is partitioned to the same partition strategy --> + <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" /> + <!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages --> + <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" /> + + <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) --> + <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs --> + <!-- bootstrap.servers is the only mandatory producerConfig --> + <producerConfig>bootstrap.servers=${kafka_urls}</producerConfig> + <!-- don't wait for a broker to ack the reception of a batch. --> + <producerConfig>acks=0</producerConfig> + <!-- wait up to 1000ms and collect log messages before sending them as a batch --> + <producerConfig>linger.ms=15000</producerConfig> + <!-- even if the producer buffer runs full, do not block the application but start to drop messages --> + <producerConfig>max.block.ms=0</producerConfig> + <!-- define a client-id that you use to identify yourself against the kafka broker --> + <producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig> + + <!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. --> + </appender> <appender name="defaultLoggerAppender" class="ch.qos.logback.core.ConsoleAppender"> @@ -59,32 +108,17 @@ <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"/> </appender> - <logger name="org.sunbird" level="INFO"> - <appender-ref ref="customLoggerAppender"/> - </logger> - <logger name="org.sunbird" level="DEBUG"> - <appender-ref ref="customLoggerAppender"/> + <logger name="TelemetryEventLogger" level="INFO"> + <appender-ref ref="kafka-appender" /> </logger> - <logger name="org.sunbird" level="WARN"> - <appender-ref ref="customLoggerAppender"/> + <logger name="queryLogger" level="DEBUG"> + <appender-ref ref="query-kafka-appender" /> </logger> - <logger name="org.sunbird" level="ERROR"> + <logger name="org.sunbird" level="INFO"> <appender-ref ref="customLoggerAppender"/> </logger> - - <logger name="play" level="INFO" /> - <logger name="defaultLogger" level="INFO" /> - <!-- Telemetry Loggers--> - <logger name="TelemetryEventLogger" level="INFO" /> - <logger name="queryLogger" level="DEBUG"> - <appender-ref ref="queryLoggerAppender" /> - </logger> - - <root level="INFO"> - <appender-ref ref="ASYNCSTDOUT" /> - </root> - + </configuration> \ No newline at end of file