application-prod.yml 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. #118
  2. server:
  3. port: 8070
  4. spring:
  5. devtools.restart.enabled: true
  6. kafka:
  7. bootstrap-servers: app2833:9094,app2834:9094,app2835:9094,app2836:9094,app2837:9094
  8. #bootstrap-servers: localhost:9092
  9. producer:
  10. # 发生错误后,消息重发的次数。
  11. retries: 1
  12. #当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
  13. batch-size: 16384
  14. # 设置生产者内存缓冲区的大小。
  15. buffer-memory: 33554432
  16. # 键的序列化方式
  17. key-serializer: org.springframework.kafka.support.serializer.JsonSerializer
  18. # 值的序列化方式
  19. value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
  20. # acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
  21. # acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
  22. # acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
  23. acks: all
  24. consumer:
  25. # 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
  26. auto-commit-interval: 1S
  27. # 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
  28. # latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
  29. # earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
  30. auto-offset-reset: latest
  31. # 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
  32. enable-auto-commit: false
  33. key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
  34. value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
  35. # 批量消费一次最大拉取的数据量
  36. max-poll-records: 1000
  37. properties:
  38. security:
  39. protocol: SASL_PLAINTEXT
  40. sasl:
  41. mechanism: SCRAM-SHA-512
  42. jaas:
  43. config: 'org.apache.kafka.common.security.scram.ScramLoginModule required username="user01" password="8b9dcf43";'
  44. listener:
  45. # 在侦听器容器中运行的线程数。
  46. concurrency: 5
  47. #listner负责ack,每调用一次,就立即commit
  48. ack-mode: manual_immediate
  49. missing-topics-fatal: false
  50. type: batch
  51. datasource:
  52. driver-class-name: com.mysql.cj.jdbc.Driver
  53. url: jdbc:mysql://74.10.28.86:3389/ax_seat_1?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
  54. username: ax_tj_seat
  55. password: Taiji@2022#seat
  56. # streams:
  57. # auto-startup: false
  58. taiji:
  59. openservice:
  60. enable: true
  61. url: http://74.10.28.87:9002
  62. elasticsearch.rest:
  63. uris: 74.10.28.65:9200,74.10.28.65:9201,74.10.28.65:9202,74.10.28.66:9200,74.10.28.66:9201,74.10.28.66:9202,74.10.28.67:9200,74.10.28.67:9201,74.10.28.67:9202,74.10.28.68:9200,74.10.28.68:9201,74.10.28.68:9202,74.10.28.69:9200,74.10.28.69:9201,74.10.28.69:9202
  64. username: ax_seat #如果你设置了基于x-pack的验证就要填写账号和密码
  65. password: ax_seat #没有则不用配置
  66. connection-timeout: 100 #连接超时
  67. max-connection: 100 #最大连接数
  68. hbase.zookeeper:
  69. quorum: hadoop7:2181,hadoop8:2181,hadoop9:2181,hadoop16:2181,hadoop17:2181
  70. znode_parent: /hbase-unsecure
  71. kafka:
  72. consumer:
  73. groupId: 202303261619_xhl_es
  74. hik-capture-face:
  75. group: ${random.uuid}
  76. topic: 'taiji_ax_hik_capture_face'
  77. partitions0: 0
  78. partitions1: 1
  79. partitions2: 2
  80. partitions3: 3
  81. partitions4: 4
  82. hik-capture-car:
  83. group: ${random.uuid}
  84. topic: 'taiji_ax_hik_capture_car'
  85. partitions0: 0
  86. partitions1: 1
  87. partitions2: 2
  88. partitions3: 3
  89. partitions4: 4
  90. hik-capture-ship:
  91. group: ${random.uuid}
  92. topic: 'ship_camera_structured_info'
  93. partitions0: 0
  94. partitions1: 1
  95. partitions2: 2
  96. partitions3: 3
  97. partitions4: 4
  98. tile: false
  99. prefix:
  100. ztpt_dynamic_ais_key_prefix: ZTPT_DYNAMIC_AIS_118_
  101. tianao_radar_fusion_key_prefix: TIANAO_RADAR_FUSION_118_
  102. dynamic_ship_track_geo_name_key: DYNAMIC_SHIP_TRACK_GEO_NAME_KEY_118_
  103. dynamic_ship_track_geo_key: dynamic_ship_track_geo_key_118_
  104. trake_user_prefix: trake_user_prefix_118_
  105. trake_dept_prefix: trake_dept_prefix_118_
  106. ship_borne_terminal_redis_key_prefix: ship_borne_terminal_118_
  107. static_beidou_law_enforcement_ship_redis_key_prefix: STATIC_BEIDOU_LAW_ENFORCEMENT_SHIP_118_
  108. static_police_man_track_redis_key_prefix: STATIC_POLICE_MAN_TRACK_REDIS_118_
  109. static_law_enforcement_car_redis_key_prefix: STATIC_LAW_ENFORCEMENT_CAR_118_;
  110. hlx_ais_redis_key_prefix: hlx_ais_redis_key_prefix_116_
  111. hlx_dynamic_radar_redis_key_prefix: hlx_dynamic_radar_redis_key_prefix_116_
  112. hlx_zyh_redis_key_prefix: hlx_zyh_redis_key_prefix_116_
  113. producer:
  114. police_ship_fusion_track:
  115. topic: taiji_ax_police_ship_fusion_track
  116. group: ${random.uuid}
  117. partitions0: 0
  118. partitions1: 1
  119. partitions2: 2
  120. partitions3: 3
  121. partitions4: 4
  122. police_car_fusion_track:
  123. topic: taiji_ax_police_car_fusion_track
  124. group: ${random.uuid}
  125. partitions0: 0
  126. partitions1: 1
  127. partitions2: 2
  128. partitions3: 3
  129. partitions4: 4
  130. police_man_fusion_track:
  131. topic: taiji_ax_police_man_fusion_track
  132. group: ${random.uuid}
  133. partitions0: 0
  134. partitions1: 1
  135. partitions2: 2
  136. partitions3: 3
  137. partitions4: 4
  138. logging:
  139. basePath: /app/dynamicTrackEs59/logs
  140. config: classpath:log-config/logback-spring.xml
  141. # 本地开发环境通过level控制
  142. level:
  143. root: info
  144. cn.com.taiji: trace
  145. org.springframework.web: debug
  146. ##amazon-s3
  147. file:
  148. store:
  149. endpoint: http://74.10.28.62:81
  150. access-key: ax_seat_acc
  151. secret-key: ax_seat_AxSeatPW
  152. bucket: ax_es_hik_images
  153. prefix: ax_es_hik_images
  154. domain: http://74.10.28.62:81/
  155. ip: 74.10.28.62
  156. port: 81
  157. httpOnly: 0