++ LOG_DIR=/var/log/contrail ++ export CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ mkdir -p /var/log/contrail/config-database-cassandra ++ log_file=/var/log/contrail/config-database-cassandra/console.log ++ touch /var/log/contrail/config-database-cassandra/console.log ++ chmod 600 /var/log/contrail/config-database-cassandra/console.log ++ exec +++ tee -a /var/log/contrail/config-database-cassandra/console.log +++ date ++ echo 'INFO: =================== Thu Oct 23 05:27:00 UTC 2025 ===================' INFO: =================== Thu Oct 23 05:27:00 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ ip route get 1 +++++ grep -o 'dev.*' +++++ awk '{print $2}' ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ cut -d / -f 1 +++ get_cidr_for_nic ens3 +++ command -v ip +++ local nic=ens3 +++ ip addr show dev ens3 +++ grep 'inet ' +++ awk '{print $2}' +++ head -n 1 ++ DEFAULT_LOCAL_IP=10.0.0.38 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=256 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 256 ]] ++ [[ -z '' ]] +++ mount -t hugetlbfs +++ tail -n 1 +++ awk '/pagesize=2M/{print($3)}' ++ HUGE_PAGES_2MB_DIR= ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=openstack ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=rbac ++ AUTH_MODE=keystone ++ AUTH_PARAMS= ++ SSL_ENABLE=false ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ANALYTICSDB_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ANALYTICS_SNMP_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:8081 ' +++ '[' -n '10.0.0.38:8081 10.0.0.33:8081 10.0.0.22:8081 ' ']' +++ echo '10.0.0.38:8081 10.0.0.33:8081 10.0.0.22:8081' ++ ANALYTICS_SERVERS='10.0.0.38:8081 10.0.0.33:8081 10.0.0.22:8081' +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:9042 ' +++ '[' -n '10.0.0.38:9042 10.0.0.33:9042 10.0.0.22:9042 ' ']' +++ echo '10.0.0.38:9042 10.0.0.33:9042 10.0.0.22:9042' ++ ANALYTICSDB_CQL_SERVERS='10.0.0.38:9042 10.0.0.33:9042 10.0.0.22:9042' ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:8086 ' +++ '[' -n '10.0.0.38:8086 10.0.0.33:8086 10.0.0.22:8086 ' ']' +++ echo '10.0.0.38:8086 10.0.0.33:8086 10.0.0.22:8086' ++ COLLECTOR_SERVERS='10.0.0.38:8086 10.0.0.33:8086 10.0.0.22:8086' ++ CASSANDRA_PORT=9161 ++ CASSANDRA_CQL_PORT=9041 ++ CASSANDRA_SSL_STORAGE_PORT=7013 ++ CASSANDRA_STORAGE_PORT=7012 ++ CASSANDRA_JMX_LOCAL_PORT=7201 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ CONFIGDB_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:8082 ' +++ '[' -n '10.0.0.38:8082 10.0.0.33:8082 10.0.0.22:8082 ' ']' +++ echo '10.0.0.38:8082 10.0.0.33:8082 10.0.0.22:8082' ++ CONFIG_SERVERS='10.0.0.38:8082 10.0.0.33:8082 10.0.0.22:8082' +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:9161 ' +++ '[' -n '10.0.0.38:9161 10.0.0.33:9161 10.0.0.22:9161 ' ']' +++ echo '10.0.0.38:9161 10.0.0.33:9161 10.0.0.22:9161' ++ CONFIGDB_SERVERS='10.0.0.38:9161 10.0.0.33:9161 10.0.0.22:9161' +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:9041 ' +++ '[' -n '10.0.0.38:9041 10.0.0.33:9041 10.0.0.22:9041 ' ']' +++ echo '10.0.0.38:9041 10.0.0.33:9041 10.0.0.22:9041' ++ CONFIGDB_CQL_SERVERS='10.0.0.38:9041 10.0.0.33:9041 10.0.0.22:9041' ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=false ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=false ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=false ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=true ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+=10.0.0.33:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+=10.0.0.22:2181, +++ '[' -n 10.0.0.38:2181,10.0.0.33:2181,10.0.0.22:2181, ']' +++ echo 10.0.0.38:2181,10.0.0.33:2181,10.0.0.22:2181 ++ ZOOKEEPER_SERVERS=10.0.0.38:2181,10.0.0.33:2181,10.0.0.22:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:2181 ' +++ '[' -n '10.0.0.38:2181 10.0.0.33:2181 10.0.0.22:2181 ' ']' +++ echo '10.0.0.38:2181 10.0.0.33:2181 10.0.0.22:2181' ++ ZOOKEEPER_SERVERS_SPACE_DELIM='10.0.0.38:2181 10.0.0.33:2181 10.0.0.22:2181' ++ RABBITMQ_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+=10.0.0.33:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+=10.0.0.22:5673, +++ '[' -n 10.0.0.38:5673,10.0.0.33:5673,10.0.0.22:5673, ']' +++ echo 10.0.0.38:5673,10.0.0.33:5673,10.0.0.22:5673 ++ RABBITMQ_SERVERS=10.0.0.38:5673,10.0.0.33:5673,10.0.0.22:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=false ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:6379 ' +++ '[' -n '10.0.0.38:6379 10.0.0.33:6379 10.0.0.22:6379 ' ']' +++ echo '10.0.0.38:6379 10.0.0.33:6379 10.0.0.22:6379' ++ REDIS_SERVERS='10.0.0.38:6379 10.0.0.33:6379 10.0.0.22:6379' ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=false ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.33 +++ local server_address=10.0.0.33 +++ extended_server_list+='10.0.0.33:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.22 +++ local server_address=10.0.0.22 +++ extended_server_list+='10.0.0.22:9092 ' +++ '[' -n '10.0.0.38:9092 10.0.0.33:9092 10.0.0.22:9092 ' ']' +++ echo '10.0.0.38:9092 10.0.0.33:9092 10.0.0.22:9092' ++ KAFKA_SERVERS='10.0.0.38:9092 10.0.0.33:9092 10.0.0.22:9092' ++ KAFKA_SSL_ENABLE=false ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/v3 ++ KEYSTONE_AUTH_HOST=10.0.0.38 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.38,10.0.0.33,10.0.0.22 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=false ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=false ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=false ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ /v3 == \/\v\2\.\0 ]] ++ [[ openstack == \o\p\e\n\s\t\a\c\k ]] ++ AUTH_MODE=keystone ++ [[ keystone == \k\e\y\s\t\o\n\e ]] ++ AUTH_PARAMS='--admin_password contrail123' ++ AUTH_PARAMS+=' --admin_tenant_name admin' ++ AUTH_PARAMS+=' --admin_user admin' ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + CONFIG=/etc/cassandra/cassandra.yaml + JVM_OPTIONS_CONFIG=/etc/cassandra/jvm.options + cp /etc/cassandra/cassandra.origin /etc/cassandra/cassandra.yaml + cp /etc/cassandra/jvm.options.origin /etc/cassandra/jvm.options + for i in '{1..10}' ++ find_my_ip_and_order_for_node_list 10.0.0.38,10.0.0.33,10.0.0.22 ++ cut -d ' ' -f 1 ++ local servers=10.0.0.38,10.0.0.33,10.0.0.22 ++ local server_list= ++ IFS=, ++ read -ra server_list +++ get_local_ips +++ tr '\n' , +++ grep -vi host +++ uniq +++ awk '/32 host/ { print f } {f=$2}' +++ sort +++ cat /proc/net/fib_trie ++ local local_ips=,10.0.0.38,10.20.0.17,127.0.0.1,172.17.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.38'\''))' ++ local server_ip=10.0.0.38 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.38 ]] ++ [[ ,10.0.0.38,10.20.0.17,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.38, ]] ++ echo 10.0.0.38 1 ++ return + my_ip=10.0.0.38 + '[' -n 10.0.0.38 ']' + break + '[' -z 10.0.0.38 ']' ++ echo 10.0.0.38,10.0.0.33,10.0.0.22 ++ tr , ' ' ++ wc -w + export CASSANDRA_COUNT=3 + CASSANDRA_COUNT=3 ++ echo 10.0.0.38,10.0.0.33,10.0.0.22 ++ sed 's/,/", "/g' + export 'CASSANDRA_CONNECT_POINTS=10.0.0.38", "10.0.0.33", "10.0.0.22' + CASSANDRA_CONNECT_POINTS='10.0.0.38", "10.0.0.33", "10.0.0.22' ++ cut -d , -f 1,2 ++ echo 10.0.0.38,10.0.0.33,10.0.0.22 + export CASSANDRA_SEEDS=10.0.0.38,10.0.0.33 + CASSANDRA_SEEDS=10.0.0.38,10.0.0.33 + export CASSANDRA_LISTEN_ADDRESS=10.0.0.38 + CASSANDRA_LISTEN_ADDRESS=10.0.0.38 + export CASSANDRA_RPC_ADDRESS=10.0.0.38 + CASSANDRA_RPC_ADDRESS=10.0.0.38 + echo 'INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g' INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g + for yaml in Xmx Xms ++ echo -Xms1g -Xmx2g ++ sed -n 's/.*\(-Xmx[0-9]*[mMgG]\).*/\1/p' + opt=-Xmx2g + [[ -n -Xmx2g ]] ++ echo -Xms1g -Xmx2g ++ sed 's/-Xmx[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS='-Xms1g ' + sed -i 's/^[#]*-Xmx.*/-Xmx2g/g' /etc/cassandra/jvm.options + for yaml in Xmx Xms ++ echo -Xms1g ++ sed -n 's/.*\(-Xms[0-9]*[mMgG]\).*/\1/p' + opt=-Xms1g + [[ -n -Xms1g ]] ++ echo -Xms1g ++ sed 's/-Xms[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS= + sed -i 's/^[#]*-Xms.*/-Xms1g/g' /etc/cassandra/jvm.options + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + is_enabled true + local val=true + [[ true == \t\r\u\e ]] + export LOCAL_JMX=no + LOCAL_JMX=no + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + is_enabled false + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + cat + change_variable memtable_flush_writers 4 + local VARIABLE_NAME=memtable_flush_writers + local VARIABLE_VALUE=4 + sed -i 's/.*\(memtable_flush_writers\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_compactors 4 + local VARIABLE_NAME=concurrent_compactors + local VARIABLE_VALUE=4 + sed -i 's/.*\(concurrent_compactors\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable compaction_throughput_mb_per_sec 256 + local VARIABLE_NAME=compaction_throughput_mb_per_sec + local VARIABLE_VALUE=256 + sed -i 's/.*\(compaction_throughput_mb_per_sec\):.*\([0-9a-z]\)/\1: 256/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_reads 64 + local VARIABLE_NAME=concurrent_reads + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_reads\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_writes 64 + local VARIABLE_NAME=concurrent_writes + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_writes\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable memtable_allocation_type offheap_objects + local VARIABLE_NAME=memtable_allocation_type + local VARIABLE_VALUE=offheap_objects + sed -i 's/.*\(memtable_allocation_type\):.*\([0-9a-z]\)/\1: offheap_objects/g' /etc/cassandra/cassandra.yaml + log_levels_map=([SYS_DEBUG]='DEBUG' [SYS_INFO]='INFO' [SYS_NOTICE]='INFO' [SYS_ERROR]="ERROR") + declare -A log_levels_map + log_level=DEBUG + '[' -n DEBUG ']' + sed -i 's/\(; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.38; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.38; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.38,10.0.0.33}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-10-23 05:27:04,324 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-10-23 05:27:04,324 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-10-23 05:27:04,324 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-10-23 05:27:04,372 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-10-23 05:27:04,373 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-10-23 05:27:04,653 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-10-23 05:27:04,682 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-4452-1. INFO [main] 2025-10-23 05:27:04,683 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-10-23 05:27:04,687 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-10-23 05:27:04,688 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4730176(4619K) committed = 4784128(4672K) max = 251658240(245760K) INFO [main] 2025-10-23 05:27:04,689 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 19533768(19075K) committed = 20316160(19840K) max = -1(-1K) INFO [main] 2025-10-23 05:27:04,689 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2264072(2211K) committed = 2490368(2432K) max = 1073741824(1048576K) INFO [main] 2025-10-23 05:27:04,689 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 93992520(91789K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-10-23 05:27:04,689 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-10-23 05:27:04,689 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-10-23 05:27:04,690 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-10-23 05:27:04,698 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-10-23 05:27:04,856 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-10-23 05:27:04,856 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-10-23 05:27:04,856 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-10-23 05:27:04,866 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-10-23 05:27:04,896 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-10-23 05:27:04,897 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-10-23 05:27:04,912 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-10-23 05:27:04,921 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-10-23 05:27:04,922 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-10-23 05:27:04,994 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-10-23 05:27:05,778 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-10-23 05:27:07,330 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-10-23 05:27:07,376 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-10-23 05:27:07,399 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-10-23 05:27:07,409 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-10-23 05:27:07,413 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-10-23 05:27:07,417 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-10-23 05:27:07,429 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-10-23 05:27:07,441 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-10-23 05:27:07,446 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-10-23 05:27:07,450 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-10-23 05:27:07,454 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-10-23 05:27:07,473 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-10-23 05:27:07,485 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-10-23 05:27:07,497 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-10-23 05:27:07,508 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-10-23 05:27:07,518 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-10-23 05:27:07,523 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-10-23 05:27:07,528 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-10-23 05:27:07,531 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-10-23 05:27:07,534 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-10-23 05:27:07,537 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-10-23 05:27:07,540 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-10-23 05:27:07,544 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-10-23 05:27:07,545 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-10-23 05:27:07,819 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-10-23 05:27:07,910 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-10-23 05:27:07,928 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-10-23 05:27:07,932 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-10-23 05:27:07,961 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-10-23 05:27:07,978 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-10-23 05:27:07,994 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-10-23 05:27:08,015 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-10-23 05:27:08,042 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-10-23 05:27:08,053 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-10-23 05:27:08,071 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-10-23 05:27:08,078 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:2] 2025-10-23 05:27:08,902 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:2] 2025-10-23 05:27:08,912 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:2] 2025-10-23 05:27:08,913 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:2] 2025-10-23 05:27:08,914 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:3] 2025-10-23 05:27:09,292 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-10-23 05:27:09,430 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-23 05:27:09,505 StorageService.java:607 - Token metadata: INFO [main] 2025-10-23 05:27:09,570 AutoSavingCache.java:174 - Completed loading (2 ms; 4 keys) KeyCache cache INFO [main] 2025-10-23 05:27:09,599 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-10-23 05:27:09,600 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-23 05:27:09,654 StorageService.java:607 - Token metadata: INFO [main] 2025-10-23 05:27:09,785 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-10-23 05:27:09,786 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-10-23 05:27:09,787 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-10-23 05:27:09,787 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-10-23 05:27:09,788 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-10-23 05:27:09,876 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-10-23 05:27:09,888 MessagingService.java:761 - Starting Messaging Service on /10.0.0.38:7012 (ens3) WARN [main] 2025-10-23 05:27:09,900 SystemKeyspace.java:1087 - No host ID found, created e3e215f6-ed95-4498-b622-6d05e5797def (Note: This should happen exactly once per node). INFO [main] 2025-10-23 05:27:09,938 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.33] 2025-10-23 05:27:09,977 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.33 INFO [HANDSHAKE-/10.0.0.22] 2025-10-23 05:27:10,027 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.22 INFO [main] 2025-10-23 05:27:10,961 StorageService.java:550 - Unable to gossip with any peers but continuing anyway since node is in its own seed list INFO [main] 2025-10-23 05:27:10,981 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-10-23 05:27:10,981 StorageService.java:822 - Starting up server gossip INFO [main] 2025-10-23 05:27:11,117 StorageService.java:883 - This node will not auto bootstrap because it is configured to be a seed node. INFO [main] 2025-10-23 05:27:11,137 BootStrapper.java:228 - Generated random tokens. tokens are [-3348238394260360278, 4515728251183960132, 5240383862359754052, 4243636942600540889, 2528638278852377215, 6329429946935225621, 1902981924036492208, -3911257184563472125, -1489218801034296413, 753568464801915537, 3871137836190980169, -2294242731070239149, -8803860199936673134, 1559061249934473511, -57524803611698016, 6446433009170684987, 1961156106243466169, -8676734289599448466, -6044792280591531748, 3791151754228413110, -4894144744058666882, -9128088832537300343, 5283514305678254237, 578338464123960801, 3222840094784419574, 4054245216642445488, 268253711348307534, 8033974695574119034, -7722954308972837258, 7898443602620142561, 4461565142853681782, -8172954004811159836, 7375905848056590713, 5208261788545549302, 1378274921798577776, -1390464315184521626, -1011077095766942884, 7906278758655716780, 4281062454697940266, -9013676270500884509, 2277023749698490144, 2349397954487969143, 3805727586218328552, -165894668404078620, -6205614792449212769, 2184117299868466498, 3200925731724955435, 9016859250797012359, 8138744164219861560, 8235553328131782181, -989538463663666275, 7517393713129888293, -7142134722779849601, 1478780770227723191, -2966221383731627929, 3880155802909036290, 7995255497822930976, -433503091957731226, -518751197968111494, -5638910017737757018, 1522225663766724024, -8220232272478518680, 3493633057881941598, -6953573360326424380, -16326576316595986, -5349034012865633802, -3044231193758878288, 2167767620692915646, 7046243775458649900, -8581154728876131329, 1046641908627790223, 8334377984591949006, -1554164042120244649, -4453893883619724755, -7278092454130118121, 2931439040951264483, -2955942703784656604, 65115408181281903, -8074929413850251761, 3042871848632455089, 1174207015308610520, 3993361172679778420, 8485058914839397888, 502918007945496992, -770860319810113732, 7098534852091542873, 576750845114661261, 364595807775850309, -3523852860798722414, 1276884632459731503, -6174005651969955484, -9040855290651820339, 6458572414906581665, 2808082872296208208, 725149980375279585, -8331364836956819357, 5403123492130668572, 8254993282800227447, 6498451596110598028, 7613564100476220101, -9013336406710194039, 4424531868333431452, 3470628984769722887, -8643838805339471182, -5927878916127319963, 224801236184160161, -3375849944492993817, 4556132035884136586, 1927682091961103290, -5514162586074938747, -7996229067582705671, -7484812212281846701, -961164088336929840, -15084899941986785, 3059794030819072035, 3685248416519022998, 221966509292621015, -7502513385885297803, 7806310031650975275, -133243095035505510, -4017686892672463951, -6047301216005205316, 179249069586952807, -7312127600577918453, 1889544730018943524, -7026686225713396395, 5974281157035985819, -532605900615182632, -4976474569605975166, -177512475092749523, -2793759610163174870, -8303451948336824207, -2346938399765006899, 2568414348209477260, -2918337842247139543, 185633251354145970, -3127038096975105926, 2849445178677373926, -2398320789029826032, -9010084445944269564, 6278989696645260087, -5675370443761444756, 6190641265942991327, -638376767429488350, -6256920895188483304, -9051293131666236775, -1430725683344822081, 2541248825917970582, -4583439279602100569, 7892925851505089411, 439298427100090748, 618857532125842079, -1861273099734939941, 3498627004956377471, -6248071436916639885, -8591587183674403526, -8768315774904962551, 6492789555716590371, 3698587086087170124, 8235563135832404608, -5545795340927028257, -68737909904784155, 4952355911388954416, 3355458342158426788, -8181970008207187104, -621605472699674542, -8293731915201520273, 3045928234845256644, -2495264125787388438, -6009725149494430324, -7749420497996612147, -8334506264407830479, 7153007869284839592, 1779386690910597336, 4083317723781854987, 5660211333672079095, 4992373812185432813, 1685438733681024043, -811985771564271658, 3476693345181358851, 7664504057510967197, -7629333491330254155, -1831971271115591587, 6922748706121127814, -3042713658551096734, -5160353118684126606, -6541562726838276148, -2609105253993221903, 5367209152034654192, 3651539644458216942, -3869829955384560696, -823342299424748626, 4828967922521522070, -5011577615369482032, -7586027300738721159, 8893124313477027770, -5847652902628588309, -9017461413429843607, 2468850048972736870, 6074238458740812327, 5798254361521876396, -8082094983257883436, 3218530561528201707, -9187799674336077181, -1220664351306713359, 3090767044228119986, -717241872228978115, -7390933781237961020, 8197186854341547759, -1586918234530172629, -8773505122410693724, -8273919884509638413, 862219998719296827, -4896438725142008287, -8322237176015459491, -8613192313476785173, 1391465591891733387, 7388762543395613075, 9033112600300076056, 6902352395503639581, -5280417027976676295, 1852938924636875092, 239026814100028748, 2700179683804460349, 2260855069751754277, -5034694521094667155, 5535632749929343799, 3427917889677930507, -8394320814231793934, -7612417726513040201, -823392525840919794, -6844914041458822407, -8287399955469621708, -49812025419026459, 8019256607777152678, -7933150530506542911, -1101192646096945541, -2774615742556887635, 3829930878300494936, 3532467699859125980, -4770873823051992750, -7823745649819463171, -8730031412105096492, -1415635221598753326, -5709596433478422330, 5032593115307147783, -6922324446458005438, -9056547965930173615, -7548741425097653571, -7008476589456379853, -753507406334998879, -1038777602242486636, -3180486059107029708, -3285732827440336530, 7833941182209041517, -3749508813868307780] INFO [main] 2025-10-23 05:27:11,153 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_traces, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2}}, tables=[org.apache.cassandra.config.CFMetaData@50b3b638[cfId=c5e99f16-8677-3914-b17e-960613512345,ksName=system_traces,cfName=sessions,flags=[COMPOUND],params=TableParams{comment=tracing sessions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [client command coordinator duration request started_at parameters]],partitionKeyColumns=[session_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[client, command, session_id, coordinator, request, started_at, duration, parameters],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@5a1398cc[cfId=8826e8e9-e16a-3728-8753-3bc1fc713c25,ksName=system_traces,cfName=events,flags=[COMPOUND],params=TableParams{comment=tracing events, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [activity source source_elapsed thread]],partitionKeyColumns=[session_id],clusteringColumns=[event_id],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[activity, event_id, session_id, source, thread, source_elapsed],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [GossipStage:1] 2025-10-23 05:27:11,410 Gossiper.java:1055 - Node /10.0.0.22 is now part of the cluster INFO [RequestResponseStage-1] 2025-10-23 05:27:11,435 Gossiper.java:1019 - InetAddress /10.0.0.22 is now UP + cqlsh 10.0.0.38 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' INFO [GossipStage:1] 2025-10-23 05:27:11,925 Gossiper.java:1055 - Node /10.0.0.33 is now part of the cluster INFO [RequestResponseStage-1] 2025-10-23 05:27:11,936 Gossiper.java:1019 - InetAddress /10.0.0.33 is now UP INFO [GossipStage:1] 2025-10-23 05:27:12,032 TokenMetadata.java:479 - Updating topology for /10.0.0.33 INFO [GossipStage:1] 2025-10-23 05:27:12,036 TokenMetadata.java:479 - Updating topology for /10.0.0.33 INFO [MigrationStage:1] 2025-10-23 05:27:12,089 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [MigrationStage:1] 2025-10-23 05:27:12,123 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [HANDSHAKE-/10.0.0.22] 2025-10-23 05:27:12,186 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.22 INFO [MigrationStage:1] 2025-10-23 05:27:12,284 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [main] 2025-10-23 05:27:12,335 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_distributed, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@647dcb66[cfId=759fffad-624b-3181-80ee-fa9a52d1f627,ksName=system_distributed,cfName=repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [coordinator exception_message exception_stacktrace finished_at parent_id range_begin range_end started_at status participants]],partitionKeyColumns=[keyspace_name, columnfamily_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[status, id, coordinator, finished_at, participants, exception_stacktrace, parent_id, range_end, range_begin, exception_message, keyspace_name, started_at, columnfamily_name],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@1aabc807[cfId=deabd734-b99d-3b9c-92e5-fd92eb5abf14,ksName=system_distributed,cfName=parent_repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [exception_message exception_stacktrace finished_at keyspace_name started_at columnfamily_names options requested_ranges successful_ranges]],partitionKeyColumns=[parent_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[requested_ranges, exception_message, keyspace_name, successful_ranges, started_at, finished_at, options, exception_stacktrace, parent_id, columnfamily_names],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@26a889ad[cfId=5582b59f-8e4e-35e1-b913-3acada51eb04,ksName=system_distributed,cfName=view_build_status,flags=[COMPOUND],params=TableParams{comment=Materialized View build status, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UUIDType),partitionColumns=[[] | [status]],partitionKeyColumns=[keyspace_name, view_name],clusteringColumns=[host_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[view_name, status, keyspace_name, host_id],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [HANDSHAKE-/10.0.0.33] 2025-10-23 05:27:12,344 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.33 Connection error: ('Unable to connect to any servers', {'10.0.0.38': error(111, "Tried connecting to [('10.0.0.38', 9041)]. Last error: Connection refused")}) + sleep 10 INFO [MigrationStage:1] 2025-10-23 05:27:12,806 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [MigrationStage:1] 2025-10-23 05:27:12,809 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [MigrationStage:1] 2025-10-23 05:27:12,854 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [MigrationStage:1] 2025-10-23 05:27:12,867 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [main] 2025-10-23 05:27:12,970 StorageService.java:1446 - JOINING: Finish joining ring INFO [main] 2025-10-23 05:27:13,208 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_auth, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1}}, tables=[org.apache.cassandra.config.CFMetaData@5db682b5[cfId=5bc52802-de25-35ed-aeab-188eecebb090,ksName=system_auth,cfName=roles,flags=[COMPOUND],params=TableParams{comment=role definitions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [can_login is_superuser salted_hash member_of]],partitionKeyColumns=[role],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[salted_hash, member_of, role, can_login, is_superuser],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@613d8514[cfId=0ecdaa87-f8fb-3e60-88d1-74fb36fe5c0d,ksName=system_auth,cfName=role_members,flags=[COMPOUND],params=TableParams{comment=role memberships lookup table, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[role],clusteringColumns=[member],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, member],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@4ed520e1[cfId=3afbe79f-2194-31a7-add7-f5ab90d8ec9c,ksName=system_auth,cfName=role_permissions,flags=[COMPOUND],params=TableParams{comment=permissions granted to db roles, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [permissions]],partitionKeyColumns=[role],clusteringColumns=[resource],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, resource, permissions],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@6c7a9864[cfId=5f2fbdad-91f1-3946-bd25-d5da3a5c35ec,ksName=system_auth,cfName=resource_role_permissons_index,flags=[COMPOUND],params=TableParams{comment=index of db roles with permissions granted on a resource, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[resource],clusteringColumns=[role],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[resource, role],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [InternalResponseStage:2] 2025-10-23 05:27:13,258 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [InternalResponseStage:2] 2025-10-23 05:27:13,261 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [InternalResponseStage:2] 2025-10-23 05:27:13,269 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [InternalResponseStage:2] 2025-10-23 05:27:13,276 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [InternalResponseStage:2] 2025-10-23 05:27:13,289 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [main] 2025-10-23 05:27:13,797 Gossiper.java:1692 - Waiting for gossip to settle... INFO [main] 2025-10-23 05:27:21,798 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-10-23 05:27:22,121 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-10-23 05:27:22,266 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-10-23 05:27:22,267 Server.java:156 - Starting listening for CQL clients on /10.0.0.38:9041 (unencrypted)... INFO [main] 2025-10-23 05:27:22,338 ThriftServer.java:116 - Binding thrift service to /10.0.0.38:9161 INFO [Thread-2] 2025-10-23 05:27:22,344 ThriftServer.java:133 - Listening for thrift clients... + cqlsh 10.0.0.38 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' INFO [Native-Transport-Requests-2] 2025-10-23 05:27:23,049 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=reaper_db, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.NetworkTopologyStrategy, datacenter1=3}}, tables=[], views=[], functions=[], types=[]} + export CASSANDRA_REAPER_JMX_KEY + run_service cassandra-reaper + [[ 10.0.0.38 == \1\0\.\0\.\0\.\3\8 ]] + sleep 120 + [[ -n 1999 ]] + [[ -n 1999 ]] + local owner_opts=1999:1999 + mkdir -p /etc/contrail /var/lib/contrail + chown 1999:1999 /etc/contrail /var/lib/contrail + find /etc/contrail -uid 0 -exec chown 1999:1999 '{}' + + chmod 755 /etc/contrail + do_run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 1999 + local user_name=contrail + export HOME=/home/contrail + HOME=/home/contrail + mkdir -p /home/contrail + chown -R 1999:1999 /home/contrail + exec setpriv --reuid 1999 --regid 1999 --clear-groups --no-new-privs cassandra-reaper Looking for reaper under /usr WARN [2025-10-23 05:27:27,111] [main] c.d.d.c.ReplicationStrategy$NetworkTopologyStrategy - Error while computing token map for keyspace reaper_db with datacenter datacenter1: could not achieve replication factor 3 (found 2 replicas only), check your keyspace replication settings. INFO [Native-Transport-Requests-1] 2025-10-23 05:27:27,262 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@523f05f0[cfId=f6875cd0-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:27,490 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [MigrationStage:1] 2025-10-23 05:27:28,274 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader WARN [2025-10-23 05:27:29,097] [main] i.c.s.CassandraStorage - Starting db migration from 0 to 31… WARN [2025-10-23 05:27:29,182] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:29,188] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:29,200] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [HANDSHAKE-/10.0.0.38] 2025-10-23 05:27:29,245 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 WARN [Native-Transport-Requests-2] 2025-10-23 05:27:29,254 TimeFcts.java:99 - The function 'dateof' is deprecated. Use the function 'toTimestamp' instead. INFO [MigrationStage:1] 2025-10-23 05:27:29,503 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [Native-Transport-Requests-1] 2025-10-23 05:27:30,068 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3c5110c2[cfId=f833ad40-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:30,193 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [MigrationStage:1] 2025-10-23 05:27:31,175 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [Native-Transport-Requests-1] 2025-10-23 05:27:32,018 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@142a89b0[cfId=f95d3920-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:32,157 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [MigrationStage:1] 2025-10-23 05:27:33,155 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [Native-Transport-Requests-1] 2025-10-23 05:27:33,968 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@62f247e8[cfId=fa86c500-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[partitioner, seed_hosts, name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:34,084 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [MigrationStage:1] 2025-10-23 05:27:35,303 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [Native-Transport-Requests-1] 2025-10-23 05:27:36,119 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@6472dac0[cfId=fbcefc70-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:36,489 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [MigrationStage:1] 2025-10-23 05:27:37,238 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [Native-Transport-Requests-1] 2025-10-23 05:27:38,099 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@a4d295c[cfId=fcfd1c30-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:38,277 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [MigrationStage:1] 2025-10-23 05:27:40,323 ColumnFamilyStore.java:411 - Initializing reaper_db.leader WARN [2025-10-23 05:27:41,254] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:41,278] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:41,282] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-23 05:27:42,198] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:42,203] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:42,209] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-23 05:27:42,326] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-10-23 05:27:42,373] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-10-23 05:27:42,460] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:42,478] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:42,484] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-23 05:27:42,569] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-10-23 05:27:42,571] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO repair_run (id,start_time,pause_time,end_time) VALUES(?, ?, ?, ?)' WARN [2025-10-23 05:27:42,657] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-10-23 05:27:42,732] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:42,736] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:42,741] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [STREAM-INIT-/10.0.0.22:37694] 2025-10-23 05:27:43,882 StreamResultFuture.java:116 - [Stream #003b5740-afd1-11f0-9725-4305b78b0d6e ID#0] Creating new streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.22:37694] 2025-10-23 05:27:43,894 StreamResultFuture.java:123 - [Stream #003b5740-afd1-11f0-9725-4305b78b0d6e, ID#0] Received streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.22:37706] 2025-10-23 05:27:43,895 StreamResultFuture.java:123 - [Stream #003b5740-afd1-11f0-9725-4305b78b0d6e, ID#0] Received streaming plan for Bootstrap INFO [STREAM-IN-/10.0.0.22:37706] 2025-10-23 05:27:43,930 StreamResultFuture.java:187 - [Stream #003b5740-afd1-11f0-9725-4305b78b0d6e] Session with /10.0.0.22 is complete INFO [STREAM-IN-/10.0.0.22:37706] 2025-10-23 05:27:43,932 StreamResultFuture.java:219 - [Stream #003b5740-afd1-11f0-9725-4305b78b0d6e] All sessions completed WARN [2025-10-23 05:27:44,646] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:44,654] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:44,656] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-10-23 05:27:44,812 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [Native-Transport-Requests-1] 2025-10-23 05:27:45,108 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@575e7254[cfId=012a9940-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:45,263 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations WARN [2025-10-23 05:27:46,181] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:46,187] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:46,190] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-3] 2025-10-23 05:27:46,220 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@3296e8e6[cfId=fa86c500-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2c9925f6[cfId=fa86c500-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-23 05:27:48,204] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:48,207] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:48,209] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-10-23 05:27:48,374 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription WARN [2025-10-23 05:27:49,272] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:49,281] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:49,286] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-1] 2025-10-23 05:27:50,074 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@11c162b5[cfId=042059a0-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:50,202 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 WARN [2025-10-23 05:27:51,112] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:51,116] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:51,118] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-3] 2025-10-23 05:27:51,141 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@b451905[cfId=04c32950-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:51,317 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 WARN [2025-10-23 05:27:52,104] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:52,141] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:52,146] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-23 05:27:52,229 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@27c12a33[cfId=fc67ba00-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, tables, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7bf57fe8[cfId=fc67ba00-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-23 05:27:53,186] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:53,193] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:53,196] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-23 05:27:53,260 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@48c1f3fd[cfId=06067ec0-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:27:53,398 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs WARN [2025-10-23 05:27:54,138] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:54,143] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:54,145] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-10-23 05:27:54,318 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule WARN [2025-10-23 05:27:55,194] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:55,198] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:55,202] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-1] 2025-10-23 05:27:55,245 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@29577a43[cfId=f833ad40-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@78325977[cfId=f833ad40-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-23 05:27:59,136] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:27:59,140] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:27:59,145] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-23 05:27:59,183 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@4faa2c35[cfId=f9f3acc0-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@19253e02[cfId=f9f3acc0-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-23 05:28:01,594] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-23 05:28:01,597] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-23 05:28:01,599] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-23 05:28:02,212] [main] i.c.s.c.Migration016 - altering every table to set `dclocal_read_repair_chance` to zero… WARN [2025-10-23 05:28:02,215] [main] i.c.s.c.Migration016 - alter every table to set `dclocal_read_repair_chance` to zero completed. INFO [Native-Transport-Requests-3] 2025-10-23 05:28:02,216 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v3' From org.apache.cassandra.config.CFMetaData@4472adc9[cfId=042059a0-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@76f5c47d[cfId=042059a0-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-5] 2025-10-23 05:28:02,218 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_unit' From org.apache.cassandra.config.CFMetaData@654124c9[cfId=fcfd1c30-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@55f6c888[cfId=fcfd1c30-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-23 05:28:02,220 MigrationManager.java:454 - Update table 'reaper_db/running_reapers' From org.apache.cassandra.config.CFMetaData@6b0ac050[cfId=f7be0810-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=running_reapers,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=180, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host]],partitionKeyColumns=[reaper_instance_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@189284a1[cfId=f7be0810-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=running_reapers,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=180, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host]],partitionKeyColumns=[reaper_instance_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-10-23 05:28:02,218 MigrationManager.java:454 - Update table 'reaper_db/schema_migration' From org.apache.cassandra.config.CFMetaData@264024ec[cfId=f6875cd0-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@77806290[cfId=f6875cd0-afd0-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-23 05:28:02,216 MigrationManager.java:454 - Update table 'reaper_db/diagnostic_event_subscription' From org.apache.cassandra.config.CFMetaData@64ba760e[cfId=0307bdb0-afd1-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@666375d2[cfId=0307bdb0-afd1-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-10-23 05:28:02,218 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@27c12a33[cfId=fc67ba00-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@5a4c7ab4[cfId=fc67ba00-afd0-11f0-8d10-c5d1d04ddfcf,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-10-23 05:28:08,153 MigrationManager.java:454 - Update table 'reaper_db/node_operations' From org.apache.cassandra.config.CFMetaData@3e27575e[cfId=012a9940-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7d7809e0[cfId=012a9940-afd1-11f0-afad-03e27b3495d5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=30, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:09,180 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:10,061 MigrationManager.java:427 - Update Keyspace 'config_db_uuid' From KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@1ed120de[cfId=0f683a80-afd1-11f0-9725-4305b78b0d6e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} To KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@1ed120de[cfId=0f683a80-afd1-11f0-9725-4305b78b0d6e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-2] 2025-10-23 05:28:11,173 MigrationManager.java:427 - Update Keyspace 'svc_monitor_keyspace' From KeyspaceMetadata{name=svc_monitor_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=svc_monitor_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-1] 2025-10-23 05:28:12,281 MigrationManager.java:454 - Update table 'config_db_uuid/obj_uuid_table' From org.apache.cassandra.config.CFMetaData@1ed120de[cfId=0f683a80-afd1-11f0-9725-4305b78b0d6e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@58667eeb[cfId=0f683a80-afd1-11f0-9725-4305b78b0d6e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-10-23 05:28:13,044 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=useragent, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} WARN [2025-10-23 05:28:13,135] [main] i.c.ReaperApplication - Reaper is ready to get things done! INFO [Native-Transport-Requests-2] 2025-10-23 05:28:14,176 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5f69fed4[cfId=127e0600-afd1-11f0-afad-03e27b3495d5,ksName=svc_monitor_keyspace,cfName=service_instance_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:14,333 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table INFO [MigrationStage:1] 2025-10-23 05:28:15,156 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [MigrationStage:1] 2025-10-23 05:28:19,255 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [MigrationStage:1] 2025-10-23 05:28:20,245 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:22,085 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/pool_table' From org.apache.cassandra.config.CFMetaData@5b804320[cfId=16081720-afd1-11f0-8d10-c5d1d04ddfcf,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3eebd39d[cfId=16081720-afd1-11f0-8d10-c5d1d04ddfcf,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-23 05:28:24,172 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5f42a8a4[cfId=18734ac0-afd1-11f0-afad-03e27b3495d5,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:24,309 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [MigrationStage:1] 2025-10-23 05:28:25,309 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:29,144 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@11ba9f55[cfId=1b69f580-afd1-11f0-afad-03e27b3495d5,ksName=svc_monitor_keyspace,cfName=healthmonitor_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:29,801 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:31,618 MigrationManager.java:454 - Update table 'config_db_uuid/obj_fq_name_table' From org.apache.cassandra.config.CFMetaData@2143e913[cfId=12fe5990-afd1-11f0-8d10-c5d1d04ddfcf,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@460594ad[cfId=12fe5990-afd1-11f0-8d10-c5d1d04ddfcf,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-23 05:28:34,059 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-5] 2025-10-23 05:28:35,237 MigrationManager.java:427 - Update Keyspace 'to_bgp_keyspace' From KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-1] 2025-10-23 05:28:36,382 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3df8096[cfId=1fba63e0-afd1-11f0-afad-03e27b3495d5,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:36,549 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [MigrationStage:1] 2025-10-23 05:28:38,168 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:39,606 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_ip_address_table' From org.apache.cassandra.config.CFMetaData@38dfe4[cfId=20b4a260-afd1-11f0-9725-4305b78b0d6e,ksName=to_bgp_keyspace,cfName=service_chain_ip_address_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3ef14a73[cfId=20b4a260-afd1-11f0-9725-4305b78b0d6e,ksName=to_bgp_keyspace,cfName=service_chain_ip_address_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:40,218 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [Native-Transport-Requests-1] 2025-10-23 05:28:42,648 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@1accb792[cfId=23768180-afd1-11f0-afad-03e27b3495d5,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:28:42,783 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table + curl http://10.0.0.38:8071/webui/login.html % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 1940 100 1940 0 0 98049 0 --:--:-- --:--:-- --:--:-- 99k ++ awk '-F: ' '/JSESSIONID/ { print $2 }' ++ tr -d '\r' ++ curl -v -X POST -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=reaperUser&password=reaperPass' http://10.0.0.38:8071/login + jsessionid='JSESSIONID=node01nsle39uyh3j3bmbm6rmpccqn0.node0; Path=/' + curl --cookie 'JSESSIONID=node01nsle39uyh3j3bmbm6rmpccqn0.node0; Path=/' -H 'Content-Type: application/json' -X POST 'http://10.0.0.38:8071/cluster?seedHost=10.0.0.38&jmxPort=7201' % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 + echo 'Reaper started successfully' Reaper started successfully INFO [Native-Transport-Requests-2] 2025-10-23 05:29:51,090 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-23 05:29:53,314 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [Native-Transport-Requests-1] 2025-10-23 05:29:55,233 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3ade2d58[cfId=4eba1910-afd1-11f0-afad-03e27b3495d5,ksName=dm_keyspace,cfName=dm_pr_asn_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:29:55,387 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [MigrationStage:1] 2025-10-23 05:29:57,456 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [Native-Transport-Requests-1] 2025-10-23 05:29:58,046 MigrationManager.java:454 - Update table 'dm_keyspace/dm_ni_ipv6_ll_table' From org.apache.cassandra.config.CFMetaData@275a534f[cfId=4ff57f40-afd1-11f0-8d10-c5d1d04ddfcf,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@41eb40d2[cfId=4ff57f40-afd1-11f0-8d10-c5d1d04ddfcf,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-23 05:29:58,449 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [HANDSHAKE-/10.0.0.33] 2025-10-23 05:35:15,129 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:15,908 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,056 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,156 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,237 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,255 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,278 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,300 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,378 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,482 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.schema_migration INFO [STREAM-INIT-/10.0.0.22:35546] 2025-10-23 05:35:16,520 StreamResultFuture.java:116 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e ID#0] Creating new streaming plan for Repair INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,522 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.schema_migration_leader INFO [STREAM-INIT-/10.0.0.22:35546] 2025-10-23 05:35:16,526 StreamResultFuture.java:123 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e, ID#0] Received streaming plan for Repair INFO [STREAM-INIT-/10.0.0.22:35554] 2025-10-23 05:35:16,527 StreamResultFuture.java:123 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e, ID#0] Received streaming plan for Repair INFO [STREAM-IN-/10.0.0.22:35554] 2025-10-23 05:35:16,537 StreamResultFuture.java:173 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e ID#0] Prepare completed. Receiving 3 files(4.049KiB), sending 2 files(3.741KiB) INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,552 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,662 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,692 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.leader INFO [StreamReceiveTask:1] 2025-10-23 05:35:16,762 StreamResultFuture.java:187 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e] Session with /10.0.0.22 is complete INFO [StreamReceiveTask:1] 2025-10-23 05:35:16,763 StreamResultFuture.java:219 - [Stream #0e3913e0-afd2-11f0-9725-4305b78b0d6e] All sessions completed INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,809 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,895 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-23 05:35:16,987 Validator.java:281 - [repair #0da2ee60-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-23 05:35:17,004 ActiveRepairService.java:452 - [repair #0d946f70-afd2-11f0-9725-4305b78b0d6e] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,727 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,761 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,789 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,814 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,845 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,878 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,915 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,931 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:25,954 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,027 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,053 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,074 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,095 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,185 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,225 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,321 Validator.java:281 - [repair #139e2690-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-23 05:35:26,336 ActiveRepairService.java:452 - [repair #139a5600-afd2-11f0-8d10-c5d1d04ddfcf] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,722 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,764 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,792 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,805 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,818 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,831 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,843 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,855 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,872 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,885 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,924 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,938 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:35,963 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,016 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,049 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.percent_repaired_by_schedule INFO [HANDSHAKE-/10.0.0.22] 2025-10-23 05:35:36,108 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,113 Validator.java:281 - [repair #1999d3f0-afd2-11f0-8d10-c5d1d04ddfcf] Sending completed merkle tree to /10.0.0.33 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,132 ActiveRepairService.java:452 - [repair #19989b70-afd2-11f0-8d10-c5d1d04ddfcf] Not a global repair, will not do anticompaction INFO [Repair-Task-2] 2025-10-23 05:35:36,518 RepairRunnable.java:139 - Starting repair command #1 (1a260e60-afd2-11f0-afad-03e27b3495d5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 7, pull repair: false) INFO [Repair-Task-2] 2025-10-23 05:35:36,538 RepairSession.java:228 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] new session: will sync /10.0.0.38, /10.0.0.33, /10.0.0.22 on range [(-7026686225713396395,-7014495386686166577], (2141904727309823299,2167767620692915646], (4450457336873776567,4455952949032645294], (2556312456299500044,2568414348209477260], (1463652665961812284,1464346655467659037], (2653418104507373811,2665142036428033499], (-2366611241604407977,-2346938399765006899]] for reaper_db.[repair_unit_v1, running_reapers, repair_schedule_v1, leader, repair_schedule_by_cluster_and_keyspace, schema_migration, running_repairs, repair_run, repair_run_by_cluster, snapshot, schema_migration_leader, percent_repaired_by_schedule, cluster, diagnostic_event_subscription, repair_run_by_unit, repair_run_by_cluster_v2] INFO [RepairJobTask:2] 2025-10-23 05:35:36,650 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:36,651 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,656 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,656 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,662 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,663 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,666 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:36,667 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-10-23 05:35:36,668 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-10-23 05:35:36,668 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-10-23 05:35:36,668 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_unit_v1 is fully synced INFO [RepairJobTask:3] 2025-10-23 05:35:36,679 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for running_reapers (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-23 05:35:36,679 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,683 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,683 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,687 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,687 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,691 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:36,693 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for running_reapers INFO [RepairJobTask:2] 2025-10-23 05:35:36,694 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:2] 2025-10-23 05:35:36,694 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:2] 2025-10-23 05:35:36,694 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] running_reapers is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:36,697 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:36,698 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,703 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,704 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,710 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,711 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,715 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:36,715 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-10-23 05:35:36,715 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-23 05:35:36,716 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-10-23 05:35:36,716 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_schedule_v1 is fully synced INFO [RepairJobTask:5] 2025-10-23 05:35:36,718 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for leader (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-23 05:35:36,718 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,720 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,720 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,725 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,725 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,727 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:36,727 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for leader INFO [RepairJobTask:4] 2025-10-23 05:35:36,728 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:6] 2025-10-23 05:35:36,729 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:4] 2025-10-23 05:35:36,729 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] leader is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:36,735 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:36,735 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,739 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,739 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,746 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,746 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,750 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-23 05:35:36,751 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-10-23 05:35:36,751 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-23 05:35:36,751 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-23 05:35:36,751 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:36,757 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for schema_migration (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:36,757 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,761 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,761 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,767 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,767 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,768 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-23 05:35:36,769 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-23 05:35:36,769 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-23 05:35:36,769 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-23 05:35:36,770 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] schema_migration is fully synced INFO [RepairJobTask:3] 2025-10-23 05:35:36,830 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for running_repairs (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-23 05:35:36,830 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,846 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,847 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,850 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,851 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,853 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:36,854 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-23 05:35:36,854 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:2] 2025-10-23 05:35:36,855 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for running_repairs INFO [RepairJobTask:4] 2025-10-23 05:35:36,855 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] running_repairs is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:36,909 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:36,909 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,918 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,918 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,929 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,930 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,932 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:36,933 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:5] 2025-10-23 05:35:36,932 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:2] 2025-10-23 05:35:36,933 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run INFO [RepairJobTask:5] 2025-10-23 05:35:36,933 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_run is fully synced INFO [RepairJobTask:5] 2025-10-23 05:35:36,941 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-23 05:35:36,941 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,947 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,947 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,952 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,952 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,954 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:36,955 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-23 05:35:36,955 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-10-23 05:35:36,955 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-23 05:35:36,955 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:36,961 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for snapshot (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:36,962 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,966 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,966 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,972 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,972 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:36,978 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-23 05:35:36,979 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:2] 2025-10-23 05:35:36,979 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-23 05:35:36,979 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for snapshot INFO [RepairJobTask:2] 2025-10-23 05:35:36,979 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] snapshot is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:36,994 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:36,995 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,006 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,008 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,018 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,018 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,023 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-23 05:35:37,026 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-10-23 05:35:37,026 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-10-23 05:35:37,026 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-10-23 05:35:37,026 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] schema_migration_leader is fully synced INFO [RepairJobTask:1] 2025-10-23 05:35:37,039 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-23 05:35:37,039 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,043 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,044 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,059 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,059 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,061 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:37,061 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-10-23 05:35:37,061 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-10-23 05:35:37,062 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-23 05:35:37,063 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:37,068 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for cluster (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:37,068 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,071 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,071 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,084 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,084 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,090 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:37,091 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for cluster INFO [RepairJobTask:6] 2025-10-23 05:35:37,092 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:7] 2025-10-23 05:35:37,094 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:3] 2025-10-23 05:35:37,094 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] cluster is fully synced INFO [RepairJobTask:7] 2025-10-23 05:35:37,136 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-23 05:35:37,137 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,139 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,140 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,143 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,144 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,149 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-23 05:35:37,149 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-10-23 05:35:37,150 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-23 05:35:37,150 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-10-23 05:35:37,151 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] diagnostic_event_subscription is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:37,154 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:37,154 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,156 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,156 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,159 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,160 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,161 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-23 05:35:37,163 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-10-23 05:35:37,164 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-10-23 05:35:37,163 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-10-23 05:35:37,164 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_run_by_unit is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:37,171 RepairJob.java:234 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:37,171 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,173 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,173 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,177 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,177 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:37,181 RepairSession.java:180 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:37,182 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-10-23 05:35:37,182 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-10-23 05:35:37,183 SyncTask.java:66 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-10-23 05:35:37,183 RepairJob.java:143 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:7] 2025-10-23 05:35:37,185 RepairSession.java:270 - [repair #1a291ba0-afd2-11f0-afad-03e27b3495d5] Session completed successfully INFO [RepairJobTask:7] 2025-10-23 05:35:37,185 RepairRunnable.java:261 - Repair session 1a291ba0-afd2-11f0-afad-03e27b3495d5 for range [(-7026686225713396395,-7014495386686166577], (2141904727309823299,2167767620692915646], (4450457336873776567,4455952949032645294], (2556312456299500044,2568414348209477260], (1463652665961812284,1464346655467659037], (2653418104507373811,2665142036428033499], (-2366611241604407977,-2346938399765006899]] finished INFO [RepairJobTask:7] 2025-10-23 05:35:37,187 ActiveRepairService.java:452 - [repair #1a260e60-afd2-11f0-afad-03e27b3495d5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-23 05:35:37,214 RepairRunnable.java:343 - Repair command #1 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-23 05:35:45,815 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-23 05:35:45,850 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:45,868 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-23 05:35:45,946 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-23 05:35:45,971 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,008 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,038 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,076 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,093 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,105 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,125 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,148 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,163 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,177 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,216 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,230 Validator.java:281 - [repair #1fa16830-afd2-11f0-9725-4305b78b0d6e] Sending completed merkle tree to /10.0.0.22 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,238 ActiveRepairService.java:452 - [repair #1f9e5af0-afd2-11f0-9725-4305b78b0d6e] Not a global repair, will not do anticompaction INFO [Repair-Task-3] 2025-10-23 05:35:46,565 RepairRunnable.java:139 - Starting repair command #2 (20231b50-afd2-11f0-afad-03e27b3495d5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-3] 2025-10-23 05:35:46,587 RepairSession.java:228 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] new session: will sync /10.0.0.38, /10.0.0.33, /10.0.0.22 on range [(1320472663936510991,1378152277156967788]] for reaper_db.[repair_unit_v1, running_reapers, repair_schedule_v1, leader, repair_schedule_by_cluster_and_keyspace, schema_migration, running_repairs, repair_run, repair_run_by_cluster, snapshot, schema_migration_leader, percent_repaired_by_schedule, cluster, diagnostic_event_subscription, repair_run_by_unit, repair_run_by_cluster_v2] INFO [RepairJobTask:2] 2025-10-23 05:35:46,615 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:46,615 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,626 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,626 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,630 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,630 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,639 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,640 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-23 05:35:46,640 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-23 05:35:46,640 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-10-23 05:35:46,640 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_unit_v1 is fully synced INFO [RepairJobTask:3] 2025-10-23 05:35:46,648 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for running_reapers (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-23 05:35:46,648 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,652 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,652 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,655 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,655 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,658 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:46,658 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-23 05:35:46,658 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-23 05:35:46,659 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-23 05:35:46,659 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] running_reapers is fully synced INFO [RepairJobTask:3] 2025-10-23 05:35:46,673 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-23 05:35:46,673 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,676 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,676 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,679 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,679 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,683 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-23 05:35:46,687 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-10-23 05:35:46,687 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-23 05:35:46,688 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-10-23 05:35:46,689 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_schedule_v1 is fully synced INFO [RepairJobTask:2] 2025-10-23 05:35:46,694 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for leader (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-23 05:35:46,694 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,698 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,698 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,711 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,712 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,722 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,723 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for leader INFO [RepairJobTask:2] 2025-10-23 05:35:46,724 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:2] 2025-10-23 05:35:46,724 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:2] 2025-10-23 05:35:46,724 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] leader is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:46,730 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:46,730 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,738 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,738 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,741 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,741 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,748 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,748 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-23 05:35:46,748 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-23 05:35:46,748 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-23 05:35:46,748 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-10-23 05:35:46,752 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for schema_migration (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-23 05:35:46,752 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,756 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,756 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,765 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,766 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,768 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,768 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-23 05:35:46,768 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-23 05:35:46,768 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-23 05:35:46,769 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] schema_migration is fully synced INFO [RepairJobTask:5] 2025-10-23 05:35:46,826 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for running_repairs (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-23 05:35:46,828 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,834 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,834 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,837 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,837 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,840 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,840 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-23 05:35:46,840 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-23 05:35:46,840 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-23 05:35:46,840 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] running_repairs is fully synced INFO [RepairJobTask:1] 2025-10-23 05:35:46,899 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-23 05:35:46,899 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,903 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,903 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,906 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,907 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,910 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-23 05:35:46,914 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:46,914 RepairJob.java:257 - Validating /10.0.0.33 INFO [RepairJobTask:2] 2025-10-23 05:35:46,916 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-23 05:35:46,916 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:1] 2025-10-23 05:35:46,917 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,917 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,919 RepairJob.java:270 - Validating /10.0.0.22 INFO [RepairJobTask:4] 2025-10-23 05:35:46,919 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_run is fully synced INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,921 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,922 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,924 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:46,926 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-10-23 05:35:46,926 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-10-23 05:35:46,926 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-23 05:35:46,926 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:46,930 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for snapshot (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:46,930 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,934 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,934 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,937 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,937 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,947 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-23 05:35:46,947 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-23 05:35:46,947 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:2] 2025-10-23 05:35:46,948 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-23 05:35:46,948 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] snapshot is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:46,950 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:46,950 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,952 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,952 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,954 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,955 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,955 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-23 05:35:46,957 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-10-23 05:35:46,957 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-23 05:35:46,957 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-10-23 05:35:46,957 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] schema_migration_leader is fully synced INFO [RepairJobTask:4] 2025-10-23 05:35:46,957 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-23 05:35:46,958 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,960 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,960 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,972 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,973 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:46,985 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-23 05:35:46,986 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:7] 2025-10-23 05:35:46,986 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-23 05:35:46,986 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-23 05:35:46,986 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:1] 2025-10-23 05:35:46,991 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for cluster (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-23 05:35:46,997 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,005 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,005 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,012 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,012 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,016 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:47,017 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for cluster INFO [RepairJobTask:1] 2025-10-23 05:35:47,017 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:1] 2025-10-23 05:35:47,017 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:4] 2025-10-23 05:35:47,017 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] cluster is fully synced INFO [RepairJobTask:6] 2025-10-23 05:35:47,025 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-23 05:35:47,025 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,030 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,030 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,033 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,033 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,037 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:47,038 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-23 05:35:47,038 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-23 05:35:47,038 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-23 05:35:47,038 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] diagnostic_event_subscription is fully synced INFO [RepairJobTask:1] 2025-10-23 05:35:47,048 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-23 05:35:47,048 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,053 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,055 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,057 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,058 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,061 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:47,064 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-23 05:35:47,064 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-23 05:35:47,064 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-23 05:35:47,065 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_run_by_unit is fully synced INFO [RepairJobTask:7] 2025-10-23 05:35:47,069 RepairJob.java:234 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.33, /10.0.0.22, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-23 05:35:47,070 RepairJob.java:257 - Validating /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,071 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.33 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,072 RepairJob.java:270 - Validating /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,075 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.22 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,076 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-23 05:35:47,078 RepairSession.java:180 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-23 05:35:47,079 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-10-23 05:35:47,080 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.22 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-10-23 05:35:47,080 SyncTask.java:66 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Endpoints /10.0.0.33 and /10.0.0.22 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-10-23 05:35:47,080 RepairJob.java:143 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-10-23 05:35:47,081 RepairSession.java:270 - [repair #202676b0-afd2-11f0-afad-03e27b3495d5] Session completed successfully INFO [RepairJobTask:6] 2025-10-23 05:35:47,081 RepairRunnable.java:261 - Repair session 202676b0-afd2-11f0-afad-03e27b3495d5 for range [(1320472663936510991,1378152277156967788]] finished INFO [RepairJobTask:6] 2025-10-23 05:35:47,083 ActiveRepairService.java:452 - [repair #20231b50-afd2-11f0-afad-03e27b3495d5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-23 05:35:47,087 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds