++ LOG_DIR=/var/log/contrail ++ export CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ mkdir -p /var/log/contrail/config-database-cassandra ++ log_file=/var/log/contrail/config-database-cassandra/console.log ++ touch /var/log/contrail/config-database-cassandra/console.log ++ chmod 600 /var/log/contrail/config-database-cassandra/console.log ++ exec +++ tee -a /var/log/contrail/config-database-cassandra/console.log +++ date ++ echo 'INFO: =================== Thu Oct 16 05:22:31 UTC 2025 ===================' INFO: =================== Thu Oct 16 05:22:31 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ grep -o 'dev.*' +++++ awk '{print $2}' +++++ ip route get 1 ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ get_cidr_for_nic ens3 +++ cut -d / -f 1 +++ command -v ip +++ local nic=ens3 +++ ip addr show dev ens3 +++ grep 'inet ' +++ head -n 1 +++ awk '{print $2}' ++ DEFAULT_LOCAL_IP=10.0.0.38 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=256 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 256 ]] ++ [[ -z '' ]] +++ tail -n 1 +++ mount -t hugetlbfs +++ awk '/pagesize=2M/{print($3)}' ++ HUGE_PAGES_2MB_DIR= ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=openstack ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=rbac ++ AUTH_MODE=keystone ++ AUTH_PARAMS= ++ SSL_ENABLE=false ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ANALYTICSDB_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ANALYTICS_SNMP_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:8081 ' +++ '[' -n '10.0.0.38:8081 10.0.0.241:8081 10.0.0.242:8081 ' ']' +++ echo '10.0.0.38:8081 10.0.0.241:8081 10.0.0.242:8081' ++ ANALYTICS_SERVERS='10.0.0.38:8081 10.0.0.241:8081 10.0.0.242:8081' +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:9042 ' +++ '[' -n '10.0.0.38:9042 10.0.0.241:9042 10.0.0.242:9042 ' ']' +++ echo '10.0.0.38:9042 10.0.0.241:9042 10.0.0.242:9042' ++ ANALYTICSDB_CQL_SERVERS='10.0.0.38:9042 10.0.0.241:9042 10.0.0.242:9042' ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:8086 ' +++ '[' -n '10.0.0.38:8086 10.0.0.241:8086 10.0.0.242:8086 ' ']' +++ echo '10.0.0.38:8086 10.0.0.241:8086 10.0.0.242:8086' ++ COLLECTOR_SERVERS='10.0.0.38:8086 10.0.0.241:8086 10.0.0.242:8086' ++ CASSANDRA_PORT=9161 ++ CASSANDRA_CQL_PORT=9041 ++ CASSANDRA_SSL_STORAGE_PORT=7013 ++ CASSANDRA_STORAGE_PORT=7012 ++ CASSANDRA_JMX_LOCAL_PORT=7201 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ CONFIGDB_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:8082 ' +++ '[' -n '10.0.0.38:8082 10.0.0.241:8082 10.0.0.242:8082 ' ']' +++ echo '10.0.0.38:8082 10.0.0.241:8082 10.0.0.242:8082' ++ CONFIG_SERVERS='10.0.0.38:8082 10.0.0.241:8082 10.0.0.242:8082' +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:9161 ' +++ '[' -n '10.0.0.38:9161 10.0.0.241:9161 10.0.0.242:9161 ' ']' +++ echo '10.0.0.38:9161 10.0.0.241:9161 10.0.0.242:9161' ++ CONFIGDB_SERVERS='10.0.0.38:9161 10.0.0.241:9161 10.0.0.242:9161' +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:9041 ' +++ '[' -n '10.0.0.38:9041 10.0.0.241:9041 10.0.0.242:9041 ' ']' +++ echo '10.0.0.38:9041 10.0.0.241:9041 10.0.0.242:9041' ++ CONFIGDB_CQL_SERVERS='10.0.0.38:9041 10.0.0.241:9041 10.0.0.242:9041' ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=false ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=false ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=false ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=true ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.20.0.14,10.20.0.18,10.20.0.252 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.20.0.14,10.20.0.18,10.20.0.252 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+=10.0.0.241:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+=10.0.0.242:2181, +++ '[' -n 10.0.0.38:2181,10.0.0.241:2181,10.0.0.242:2181, ']' +++ echo 10.0.0.38:2181,10.0.0.241:2181,10.0.0.242:2181 ++ ZOOKEEPER_SERVERS=10.0.0.38:2181,10.0.0.241:2181,10.0.0.242:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:2181 ' +++ '[' -n '10.0.0.38:2181 10.0.0.241:2181 10.0.0.242:2181 ' ']' +++ echo '10.0.0.38:2181 10.0.0.241:2181 10.0.0.242:2181' ++ ZOOKEEPER_SERVERS_SPACE_DELIM='10.0.0.38:2181 10.0.0.241:2181 10.0.0.242:2181' ++ RABBITMQ_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+=10.0.0.241:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+=10.0.0.242:5673, +++ '[' -n 10.0.0.38:5673,10.0.0.241:5673,10.0.0.242:5673, ']' +++ echo 10.0.0.38:5673,10.0.0.241:5673,10.0.0.242:5673 ++ RABBITMQ_SERVERS=10.0.0.38:5673,10.0.0.241:5673,10.0.0.242:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=false ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:6379 ' +++ '[' -n '10.0.0.38:6379 10.0.0.241:6379 10.0.0.242:6379 ' ']' +++ echo '10.0.0.38:6379 10.0.0.241:6379 10.0.0.242:6379' ++ REDIS_SERVERS='10.0.0.38:6379 10.0.0.241:6379 10.0.0.242:6379' ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=false ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.241 +++ local server_address=10.0.0.241 +++ extended_server_list+='10.0.0.241:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.242 +++ local server_address=10.0.0.242 +++ extended_server_list+='10.0.0.242:9092 ' +++ '[' -n '10.0.0.38:9092 10.0.0.241:9092 10.0.0.242:9092 ' ']' +++ echo '10.0.0.38:9092 10.0.0.241:9092 10.0.0.242:9092' ++ KAFKA_SERVERS='10.0.0.38:9092 10.0.0.241:9092 10.0.0.242:9092' ++ KAFKA_SSL_ENABLE=false ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/v3 ++ KEYSTONE_AUTH_HOST=10.0.0.38 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.38,10.0.0.241,10.0.0.242 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=false ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=false ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=false ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ /v3 == \/\v\2\.\0 ]] ++ [[ openstack == \o\p\e\n\s\t\a\c\k ]] ++ AUTH_MODE=keystone ++ [[ keystone == \k\e\y\s\t\o\n\e ]] ++ AUTH_PARAMS='--admin_password contrail123' ++ AUTH_PARAMS+=' --admin_tenant_name admin' ++ AUTH_PARAMS+=' --admin_user admin' ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + CONFIG=/etc/cassandra/cassandra.yaml + JVM_OPTIONS_CONFIG=/etc/cassandra/jvm.options + cp /etc/cassandra/cassandra.origin /etc/cassandra/cassandra.yaml + cp /etc/cassandra/jvm.options.origin /etc/cassandra/jvm.options + for i in '{1..10}' ++ cut -d ' ' -f 1 ++ find_my_ip_and_order_for_node_list 10.0.0.38,10.0.0.241,10.0.0.242 ++ local servers=10.0.0.38,10.0.0.241,10.0.0.242 ++ local server_list= ++ IFS=, ++ read -ra server_list +++ get_local_ips +++ tr '\n' , +++ cat /proc/net/fib_trie +++ awk '/32 host/ { print f } {f=$2}' +++ sort +++ grep -vi host +++ uniq ++ local local_ips=,10.0.0.38,10.20.0.14,127.0.0.1,172.17.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.38'\''))' ++ local server_ip=10.0.0.38 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.38 ]] ++ [[ ,10.0.0.38,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.38, ]] ++ echo 10.0.0.38 1 ++ return + my_ip=10.0.0.38 + '[' -n 10.0.0.38 ']' + break + '[' -z 10.0.0.38 ']' ++ echo 10.0.0.38,10.0.0.241,10.0.0.242 ++ wc -w ++ tr , ' ' + export CASSANDRA_COUNT=3 + CASSANDRA_COUNT=3 ++ sed 's/,/", "/g' ++ echo 10.0.0.38,10.0.0.241,10.0.0.242 + export 'CASSANDRA_CONNECT_POINTS=10.0.0.38", "10.0.0.241", "10.0.0.242' + CASSANDRA_CONNECT_POINTS='10.0.0.38", "10.0.0.241", "10.0.0.242' ++ echo 10.0.0.38,10.0.0.241,10.0.0.242 ++ cut -d , -f 1,2 + export CASSANDRA_SEEDS=10.0.0.38,10.0.0.241 + CASSANDRA_SEEDS=10.0.0.38,10.0.0.241 + export CASSANDRA_LISTEN_ADDRESS=10.0.0.38 + CASSANDRA_LISTEN_ADDRESS=10.0.0.38 + export CASSANDRA_RPC_ADDRESS=10.0.0.38 + CASSANDRA_RPC_ADDRESS=10.0.0.38 + echo 'INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g' INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g + for yaml in Xmx Xms ++ echo -Xms1g -Xmx2g ++ sed -n 's/.*\(-Xmx[0-9]*[mMgG]\).*/\1/p' + opt=-Xmx2g + [[ -n -Xmx2g ]] ++ echo -Xms1g -Xmx2g ++ sed 's/-Xmx[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS='-Xms1g ' + sed -i 's/^[#]*-Xmx.*/-Xmx2g/g' /etc/cassandra/jvm.options + for yaml in Xmx Xms ++ echo -Xms1g ++ sed -n 's/.*\(-Xms[0-9]*[mMgG]\).*/\1/p' + opt=-Xms1g + [[ -n -Xms1g ]] ++ sed 's/-Xms[0-9]*[mMgG]//g' ++ echo -Xms1g + JVM_EXTRA_OPTS= + sed -i 's/^[#]*-Xms.*/-Xms1g/g' /etc/cassandra/jvm.options + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + is_enabled true + local val=true + [[ true == \t\r\u\e ]] + export LOCAL_JMX=no + LOCAL_JMX=no + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + is_enabled false + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + cat + change_variable memtable_flush_writers 4 + local VARIABLE_NAME=memtable_flush_writers + local VARIABLE_VALUE=4 + sed -i 's/.*\(memtable_flush_writers\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_compactors 4 + local VARIABLE_NAME=concurrent_compactors + local VARIABLE_VALUE=4 + sed -i 's/.*\(concurrent_compactors\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable compaction_throughput_mb_per_sec 256 + local VARIABLE_NAME=compaction_throughput_mb_per_sec + local VARIABLE_VALUE=256 + sed -i 's/.*\(compaction_throughput_mb_per_sec\):.*\([0-9a-z]\)/\1: 256/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_reads 64 + local VARIABLE_NAME=concurrent_reads + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_reads\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_writes 64 + local VARIABLE_NAME=concurrent_writes + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_writes\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable memtable_allocation_type offheap_objects + local VARIABLE_NAME=memtable_allocation_type + local VARIABLE_VALUE=offheap_objects + sed -i 's/.*\(memtable_allocation_type\):.*\([0-9a-z]\)/\1: offheap_objects/g' /etc/cassandra/cassandra.yaml + log_levels_map=([SYS_DEBUG]='DEBUG' [SYS_INFO]='INFO' [SYS_NOTICE]='INFO' [SYS_ERROR]="ERROR") + declare -A log_levels_map + log_level=DEBUG + '[' -n DEBUG ']' + sed -i 's/\(; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.38; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.38; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.38,10.0.0.241}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-10-16 05:22:35,898 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-10-16 05:22:35,899 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-10-16 05:22:35,899 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-10-16 05:22:35,932 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-10-16 05:22:35,932 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-10-16 05:22:36,210 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-10-16 05:22:36,225 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-4340-1. INFO [main] 2025-10-16 05:22:36,227 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-10-16 05:22:36,229 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-10-16 05:22:36,230 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4507648(4402K) committed = 4521984(4416K) max = 251658240(245760K) INFO [main] 2025-10-16 05:22:36,230 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 19323040(18870K) committed = 20054016(19584K) max = -1(-1K) INFO [main] 2025-10-16 05:22:36,231 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2250928(2198K) committed = 2490368(2432K) max = 1073741824(1048576K) INFO [main] 2025-10-16 05:22:36,231 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 93992640(91789K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-10-16 05:22:36,232 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-10-16 05:22:36,233 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-10-16 05:22:36,233 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-10-16 05:22:36,233 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-10-16 05:22:36,401 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-10-16 05:22:36,405 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-10-16 05:22:36,405 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-10-16 05:22:36,419 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-10-16 05:22:36,468 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-10-16 05:22:36,471 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-10-16 05:22:36,499 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-10-16 05:22:36,503 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-10-16 05:22:36,504 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-10-16 05:22:36,560 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-10-16 05:22:37,265 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-10-16 05:22:39,049 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-10-16 05:22:39,077 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-10-16 05:22:39,155 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-10-16 05:22:39,198 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-10-16 05:22:39,237 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-10-16 05:22:39,259 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-10-16 05:22:39,284 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-10-16 05:22:39,308 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-10-16 05:22:39,333 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-10-16 05:22:39,354 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-10-16 05:22:39,382 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-10-16 05:22:39,388 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-10-16 05:22:39,395 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-10-16 05:22:39,400 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-10-16 05:22:39,415 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-10-16 05:22:39,421 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-10-16 05:22:39,429 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-10-16 05:22:39,456 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-10-16 05:22:39,466 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-10-16 05:22:39,478 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-10-16 05:22:39,493 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-10-16 05:22:39,507 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-10-16 05:22:39,518 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-10-16 05:22:39,522 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-10-16 05:22:39,685 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-10-16 05:22:39,769 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-10-16 05:22:39,775 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-10-16 05:22:39,779 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-10-16 05:22:39,786 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-10-16 05:22:39,808 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-10-16 05:22:39,839 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-10-16 05:22:39,854 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-10-16 05:22:39,884 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-10-16 05:22:39,899 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-10-16 05:22:39,911 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-10-16 05:22:39,927 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:1] 2025-10-16 05:22:40,612 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:1] 2025-10-16 05:22:40,732 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:1] 2025-10-16 05:22:40,738 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:1] 2025-10-16 05:22:40,740 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:4] 2025-10-16 05:22:41,055 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-10-16 05:22:41,145 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-16 05:22:41,237 StorageService.java:607 - Token metadata: INFO [main] 2025-10-16 05:22:41,292 AutoSavingCache.java:174 - Completed loading (11 ms; 5 keys) KeyCache cache INFO [main] 2025-10-16 05:22:41,325 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-10-16 05:22:41,325 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-16 05:22:41,348 StorageService.java:607 - Token metadata: INFO [main] 2025-10-16 05:22:41,491 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-10-16 05:22:41,493 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-10-16 05:22:41,493 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-10-16 05:22:41,494 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-10-16 05:22:41,499 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-10-16 05:22:41,527 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-10-16 05:22:41,534 MessagingService.java:761 - Starting Messaging Service on /10.0.0.38:7012 (ens3) WARN [main] 2025-10-16 05:22:41,543 SystemKeyspace.java:1087 - No host ID found, created 30886e86-cc2d-439a-b479-d451afecf8cb (Note: This should happen exactly once per node). INFO [main] 2025-10-16 05:22:41,565 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.241] 2025-10-16 05:22:41,612 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.241 INFO [HANDSHAKE-/10.0.0.242] 2025-10-16 05:22:41,663 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.242 INFO [main] 2025-10-16 05:22:42,590 StorageService.java:550 - Unable to gossip with any peers but continuing anyway since node is in its own seed list INFO [main] 2025-10-16 05:22:42,630 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-10-16 05:22:42,633 StorageService.java:822 - Starting up server gossip INFO [main] 2025-10-16 05:22:42,784 StorageService.java:883 - This node will not auto bootstrap because it is configured to be a seed node. INFO [main] 2025-10-16 05:22:42,802 BootStrapper.java:228 - Generated random tokens. tokens are [5754390781764386797, -3499456077589973894, -164637248002052878, 6352724687925873393, 4417992690886165821, -5112906264648847253, 6271467364896641619, -465043112574336614, -3714769376407299017, -5344304911605214374, 179978316920795759, 6928552997914910752, 1199551337179251520, 9068869326521643672, -3598659895083019824, -8031501539371801104, 2377249342949121139, 2782127575938994011, 968578332673512878, 2637591740886173954, 1060343472593241481, 7341286072023389817, -5592687226300865368, 6627406732907803032, 2199132458348414489, -8437223559603803068, -3218238108388431617, -2836955453042008753, 1821452411970126563, -3188704850092665093, -1819946958326675580, -8033383895997756955, -9148385718168737579, -2216237493237493997, -2205733127728559579, 1766459013518804686, -1255991392067274674, 3514816463053993339, -2621114307963781213, -6240270444730596014, -8376779142172824886, -1944070915905536553, 3393384308066899706, 4506490208512953882, 885669079218512923, -4281436509569606062, -4226894281126750386, 3986162296166800480, 3837617483023213192, 8624099316139056449, 7967095513445240670, 5619128992967683952, -8109163277141487169, -1644493313771294110, -7786067082465511708, -7382545688785285642, -3811549709990359417, 7912856651210194292, 8087372412505995686, -8139499831726202906, 1825300599107644419, 3058312003685325163, -5240898571372527923, -3955234213673172844, -5975092482050019039, 8847611238417318824, -1895374350811807313, 5555845796093935162, -5292531865219994726, -5373747812716286656, 5906144851851406948, -1159232939807719797, 6943812938038627655, 7397117272447085256, -8086929335577328402, -1724972073083465233, 8048215881089505133, 8138595047803419235, 4646053634285456189, 4912076594323668784, -2031835264258270711, 4981517720662118956, -5611418273250666821, -8277335025304717887, 3537908353339593684, -1969967902436034827, -2941421669387289941, -5677567552924720532, 284022361250337055, 801315033528005863, -8743456713779461548, 174171437809532826, 949383837810252345, -5350656267551793097, 8906872306610077530, 9052045305779947469, 4527575859403604825, 828042641520026373, -5328409866636209990, 2253017459777337927, 5223334753160547478, 8128654128567680750, 4559298585703757771, -7576056568840770783, -4160452496615446307, 7338340315787222433, 4775048338342902762, -7819548602888406826, 8069041147442107428, -507625119092061984, 7818869661876777457, 7255957498656984660, 4816115937040789918, -8539491647030171521, 7892203029445948129, 8979103303613189576, -8002450149761204888, 4457217956600228020, 4803136334487872918, 8799915325646614066, -5120206000267118934, 5358476738255490144, -6699253486047605687, -6823072085966571331, 2483952707661756499, 8046551534248816, -3611954291642601433, -9039384028125971112, 2309565122445390128, 1397858845265808026, -4208232586524590497, -3954197291687755329, 4009543786363966952, 8925824731854825224, 5681437485491124446, 4152189648688129785, -5324530164949204669, 3898679155369060842, 3543746487712920232, -6675175655129101826, 6972108219761619662, -1781295098193772604, -4113651019243797032, -187687265850100442, 8636966469320588092, 4703677355401194313, -8103230338283880187, -1172842514876860901, -6536237363741242300, -1355679392336201040, -545652166988930366, 4165337941974053105, -2791959141813297337, -3950931168018410183, 6650975360378244746, -2810303283031712740, -719839660699907531, 2025963911492339712, -2021451206841319065, 5486048999629415977, -1460696154059791253, 841560921087935976, 571982381867569347, -1721604546943367853, -3381176437455739612, -8730265614240227754, 5560692552318518295, -7329941216006598870, -3654886776727002326, -4919027508712555338, 2866993751941146251, -3906993379064957536, -8143164958492091757, -974207083682807912, 3689153736289287420, -6459162074008535719, 8481989507931760783, -659915960359447177, 3673667316695179774, 7222940799234718517, -6807866030603597210, 574991535652183098, 9000155935818778679, -8534937365168842322, -1587388840818287539, 3568878788233209250, 8068292207022187018, -99743078203753188, 5643990235065490636, -7827623543322959562, 4120849302681529826, 3297108919704407905, 9019948397267929232, -6857479124730923650, 2397872506926732953, 3466618812386206489, 591147859881439134, 4086588370076814563, -3821088154219009603, 7911123483952400273, 5357617748157735020, -6392081827728801971, 200360631169675893, -3302107808518840579, 5229519529772130631, 8034340115409299596, 697377991326100610, 34609054852043608, -5087500980981861827, 6390743462101341516, 8935134226062379393, 4592762414350433300, 3226141199459060278, -8054538961773757687, -6329601914152196537, -1128438368378221229, 2789855883345270636, 8456284985756264424, -7348575811430245828, 2703595001456115504, 8081284853295452990, -4578228075087232601, -8621830047717932198, -8265786292373053929, -4133603064780122203, -6017108387214914181, -7735358294478186311, 2375725779078576262, -1663368099966936990, 7986509753056129358, -8313712312679867888, -6487678127230704532, -8882846490521989794, 3142503618858155338, -7849744158542127125, 3408968085000422370, -1805898236408734103, 8287689970143006771, 1511669139581976707, -5716251655619684321, 1556214809232607204, 6055242016833384255, 6036767287821599670, -7430353718587256910, 1162067258874742673, -9207111136238127078, 6666959839444144658, 6494070942568387350, -1310985076179012443, -6653671578404428355, 6111038147971564378, 2854449740090705366, -1858964676111985439, -4694243956221635917, -1534560497456172335, 1587848521394116338] INFO [main] 2025-10-16 05:22:42,805 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_traces, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2}}, tables=[org.apache.cassandra.config.CFMetaData@3c0a8f19[cfId=c5e99f16-8677-3914-b17e-960613512345,ksName=system_traces,cfName=sessions,flags=[COMPOUND],params=TableParams{comment=tracing sessions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [client command coordinator duration request started_at parameters]],partitionKeyColumns=[session_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[client, command, session_id, coordinator, request, started_at, duration, parameters],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@4e756254[cfId=8826e8e9-e16a-3728-8753-3bc1fc713c25,ksName=system_traces,cfName=events,flags=[COMPOUND],params=TableParams{comment=tracing events, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [activity source source_elapsed thread]],partitionKeyColumns=[session_id],clusteringColumns=[event_id],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[activity, event_id, session_id, source, thread, source_elapsed],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [GossipStage:1] 2025-10-16 05:22:43,003 Gossiper.java:1055 - Node /10.0.0.242 is now part of the cluster INFO [RequestResponseStage-1] 2025-10-16 05:22:43,014 Gossiper.java:1019 - InetAddress /10.0.0.242 is now UP INFO [GossipStage:1] 2025-10-16 05:22:43,064 Gossiper.java:1055 - Node /10.0.0.241 is now part of the cluster INFO [RequestResponseStage-1] 2025-10-16 05:22:43,079 Gossiper.java:1019 - InetAddress /10.0.0.241 is now UP + cqlsh 10.0.0.38 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' INFO [MigrationStage:1] 2025-10-16 05:22:43,470 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [MigrationStage:1] 2025-10-16 05:22:43,473 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [MigrationStage:1] 2025-10-16 05:22:43,489 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [main] 2025-10-16 05:22:43,507 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_distributed, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@401ef4b8[cfId=759fffad-624b-3181-80ee-fa9a52d1f627,ksName=system_distributed,cfName=repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [coordinator exception_message exception_stacktrace finished_at parent_id range_begin range_end started_at status participants]],partitionKeyColumns=[keyspace_name, columnfamily_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[status, id, coordinator, finished_at, participants, exception_stacktrace, parent_id, range_end, range_begin, exception_message, keyspace_name, started_at, columnfamily_name],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@4d6b7f72[cfId=deabd734-b99d-3b9c-92e5-fd92eb5abf14,ksName=system_distributed,cfName=parent_repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [exception_message exception_stacktrace finished_at keyspace_name started_at columnfamily_names options requested_ranges successful_ranges]],partitionKeyColumns=[parent_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[requested_ranges, exception_message, keyspace_name, successful_ranges, started_at, finished_at, options, exception_stacktrace, parent_id, columnfamily_names],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@8803c32[cfId=5582b59f-8e4e-35e1-b913-3acada51eb04,ksName=system_distributed,cfName=view_build_status,flags=[COMPOUND],params=TableParams{comment=Materialized View build status, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UUIDType),partitionColumns=[[] | [status]],partitionKeyColumns=[keyspace_name, view_name],clusteringColumns=[host_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[view_name, status, keyspace_name, host_id],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [HANDSHAKE-/10.0.0.242] 2025-10-16 05:22:43,516 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.242 INFO [HANDSHAKE-/10.0.0.241] 2025-10-16 05:22:43,518 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.241 Connection error: ('Unable to connect to any servers', {'10.0.0.38': error(111, "Tried connecting to [('10.0.0.38', 9041)]. Last error: Connection refused")}) + sleep 10 INFO [MigrationStage:1] 2025-10-16 05:22:44,113 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [MigrationStage:1] 2025-10-16 05:22:44,123 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [MigrationStage:1] 2025-10-16 05:22:44,216 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [MigrationStage:1] 2025-10-16 05:22:44,310 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [main] 2025-10-16 05:22:44,634 StorageService.java:1446 - JOINING: Finish joining ring INFO [main] 2025-10-16 05:22:44,797 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_auth, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1}}, tables=[org.apache.cassandra.config.CFMetaData@7eb7f5d0[cfId=5bc52802-de25-35ed-aeab-188eecebb090,ksName=system_auth,cfName=roles,flags=[COMPOUND],params=TableParams{comment=role definitions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [can_login is_superuser salted_hash member_of]],partitionKeyColumns=[role],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[salted_hash, member_of, role, can_login, is_superuser],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@10186675[cfId=0ecdaa87-f8fb-3e60-88d1-74fb36fe5c0d,ksName=system_auth,cfName=role_members,flags=[COMPOUND],params=TableParams{comment=role memberships lookup table, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[role],clusteringColumns=[member],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, member],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@469f9aa7[cfId=3afbe79f-2194-31a7-add7-f5ab90d8ec9c,ksName=system_auth,cfName=role_permissions,flags=[COMPOUND],params=TableParams{comment=permissions granted to db roles, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [permissions]],partitionKeyColumns=[role],clusteringColumns=[resource],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, resource, permissions],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@5477f884[cfId=5f2fbdad-91f1-3946-bd25-d5da3a5c35ec,ksName=system_auth,cfName=resource_role_permissons_index,flags=[COMPOUND],params=TableParams{comment=index of db roles with permissions granted on a resource, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[resource],clusteringColumns=[role],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[resource, role],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [InternalResponseStage:1] 2025-10-16 05:22:44,894 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [InternalResponseStage:1] 2025-10-16 05:22:44,902 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [InternalResponseStage:1] 2025-10-16 05:22:44,920 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [InternalResponseStage:1] 2025-10-16 05:22:44,943 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [InternalResponseStage:1] 2025-10-16 05:22:44,980 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [main] 2025-10-16 05:22:45,190 Gossiper.java:1692 - Waiting for gossip to settle... INFO [main] 2025-10-16 05:22:53,191 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-10-16 05:22:53,466 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-10-16 05:22:53,671 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-10-16 05:22:53,675 Server.java:156 - Starting listening for CQL clients on /10.0.0.38:9041 (unencrypted)... INFO [main] 2025-10-16 05:22:53,800 ThriftServer.java:116 - Binding thrift service to /10.0.0.38:9161 INFO [Thread-2] 2025-10-16 05:22:53,805 ThriftServer.java:133 - Listening for thrift clients... + cqlsh 10.0.0.38 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' INFO [Native-Transport-Requests-1] 2025-10-16 05:22:54,506 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=reaper_db, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.NetworkTopologyStrategy, datacenter1=3}}, tables=[], views=[], functions=[], types=[]} + export CASSANDRA_REAPER_JMX_KEY + [[ 10.0.0.38 == \1\0\.\0\.\0\.\3\8 ]] + sleep 120 + run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + local owner_opts=1999:1999 + mkdir -p /etc/contrail /var/lib/contrail + chown 1999:1999 /etc/contrail /var/lib/contrail + find /etc/contrail -uid 0 -exec chown 1999:1999 '{}' + + chmod 755 /etc/contrail + do_run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 1999 + local user_name=contrail + export HOME=/home/contrail + HOME=/home/contrail + mkdir -p /home/contrail + chown -R 1999:1999 /home/contrail + exec setpriv --reuid 1999 --regid 1999 --clear-groups --no-new-privs cassandra-reaper Looking for reaper under /usr WARN [2025-10-16 05:22:59,659] [main] c.d.d.c.ReplicationStrategy$NetworkTopologyStrategy - Error while computing token map for keyspace reaper_db with datacenter datacenter1: could not achieve replication factor 3 (found 2 replicas only), check your keyspace replication settings. INFO [Native-Transport-Requests-2] 2025-10-16 05:22:59,900 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5857aedb[cfId=2e470fc0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:00,080 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [MigrationStage:1] 2025-10-16 05:23:00,481 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader WARN [2025-10-16 05:23:01,084] [main] i.c.s.CassandraStorage - Starting db migration from 0 to 31… WARN [2025-10-16 05:23:01,182] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:01,189] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:01,193] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [HANDSHAKE-/10.0.0.38] 2025-10-16 05:23:01,223 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 WARN [Native-Transport-Requests-1] 2025-10-16 05:23:01,230 TimeFcts.java:99 - The function 'dateof' is deprecated. Use the function 'toTimestamp' instead. INFO [MigrationStage:1] 2025-10-16 05:23:01,457 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [Native-Transport-Requests-4] 2025-10-16 05:23:01,867 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@310368f8[cfId=2f7333b0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:02,029 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [MigrationStage:1] 2025-10-16 05:23:02,454 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [Native-Transport-Requests-4] 2025-10-16 05:23:02,821 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3bfb0388[cfId=3004c550-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:02,952 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [MigrationStage:1] 2025-10-16 05:23:03,328 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [Native-Transport-Requests-5] 2025-10-16 05:23:03,801 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3fe89f74[cfId=309a4e90-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[partitioner, seed_hosts, name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:04,025 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [MigrationStage:1] 2025-10-16 05:23:04,362 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [Native-Transport-Requests-5] 2025-10-16 05:23:05,057 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@70126cc2[cfId=3159f510-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:05,174 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [MigrationStage:1] 2025-10-16 05:23:05,950 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [Native-Transport-Requests-4] 2025-10-16 05:23:06,196 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5417691c[cfId=3207c140-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:06,345 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [MigrationStage:1] 2025-10-16 05:23:07,356 ColumnFamilyStore.java:411 - Initializing reaper_db.leader WARN [2025-10-16 05:23:08,055] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:08,060] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:08,064] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-16 05:23:09,224] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:09,229] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:09,233] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-16 05:23:09,331] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-10-16 05:23:09,369] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-10-16 05:23:09,420] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:09,426] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:09,429] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-16 05:23:09,495] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-10-16 05:23:09,497] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO repair_run (id,start_time,pause_time,end_time) VALUES(?, ?, ?, ?)' WARN [2025-10-16 05:23:09,527] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-10-16 05:23:09,573] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:09,578] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:09,581] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-16 05:23:10,746] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:10,752] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:10,755] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-10-16 05:23:10,958 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [Native-Transport-Requests-5] 2025-10-16 05:23:11,752 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@732a9556[cfId=35578880-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:12,013 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations WARN [2025-10-16 05:23:12,326] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:12,329] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:12,332] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-3] 2025-10-16 05:23:12,358 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@2617c8ad[cfId=309a4e90-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@22cb38d7[cfId=309a4e90-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:13,309] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:13,313] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:13,315] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-10-16 05:23:13,472 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription WARN [2025-10-16 05:23:14,006] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:14,010] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:14,014] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [STREAM-INIT-/10.0.0.242:44436] 2025-10-16 05:23:14,455 StreamResultFuture.java:116 - [Stream #36c437e0-aa50-11f0-b61b-4ff51e317e75 ID#0] Creating new streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.242:44436] 2025-10-16 05:23:14,477 StreamResultFuture.java:123 - [Stream #36c437e0-aa50-11f0-b61b-4ff51e317e75, ID#0] Received streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.242:44440] 2025-10-16 05:23:14,499 StreamResultFuture.java:123 - [Stream #36c437e0-aa50-11f0-b61b-4ff51e317e75, ID#0] Received streaming plan for Bootstrap INFO [STREAM-IN-/10.0.0.242:44440] 2025-10-16 05:23:14,533 StreamResultFuture.java:187 - [Stream #36c437e0-aa50-11f0-b61b-4ff51e317e75] Session with /10.0.0.242 is complete INFO [STREAM-IN-/10.0.0.242:44440] 2025-10-16 05:23:14,537 StreamResultFuture.java:219 - [Stream #36c437e0-aa50-11f0-b61b-4ff51e317e75] All sessions completed INFO [Native-Transport-Requests-1] 2025-10-16 05:23:14,833 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@48117272[cfId=372da810-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:14,945 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 WARN [2025-10-16 05:23:15,290] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:15,296] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:15,300] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-1] 2025-10-16 05:23:15,346 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@61c6c61e[cfId=377bc810-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:15,487 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 WARN [2025-10-16 05:23:15,991] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:15,995] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:16,000] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-16 05:23:16,036 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@150e2bb0[cfId=31cd9e70-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, tables, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@729fcd68[cfId=31cd9e70-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:16,879] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:16,882] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:16,884] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-4] 2025-10-16 05:23:16,915 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@325f4c3[cfId=386b5830-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:17,055 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs WARN [2025-10-16 05:23:17,352] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:17,356] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:17,359] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-16 05:23:17,385 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@65bb6d91[cfId=38b30f90-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:17,526 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule WARN [2025-10-16 05:23:18,010] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:18,013] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:18,015] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-10-16 05:23:18,040 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@6b488a36[cfId=2f7333b0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@1c7d13b2[cfId=2f7333b0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-7] 2025-10-16 05:23:20,195 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@150e2bb0[cfId=31cd9e70-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@18123ab0[cfId=31cd9e70-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:21,014] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:21,018] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:21,020] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-3] 2025-10-16 05:23:21,056 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@6b0bece0[cfId=303d1360-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@109541a1[cfId=303d1360-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:23,239] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-10-16 05:23:23,245] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-10-16 05:23:23,247] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-10-16 05:23:23,849] [main] i.c.s.c.Migration016 - altering every table to set `dclocal_read_repair_chance` to zero… INFO [Native-Transport-Requests-5] 2025-10-16 05:23:23,854 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@6b488a36[cfId=2f7333b0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@49ea1907[cfId=2f7333b0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-10-16 05:23:23,854 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v3' From org.apache.cassandra.config.CFMetaData@a434048[cfId=372da810-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@36a85dcd[cfId=372da810-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-10-16 05:23:23,855 MigrationManager.java:454 - Update table 'reaper_db/schema_migration_leader' From org.apache.cassandra.config.CFMetaData@53f4fb43[cfId=2e6ebc00-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=schema_migration_leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [leader leader_hostname took_lead_at]],partitionKeyColumns=[keyspace_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[leader, keyspace_name, took_lead_at, leader_hostname],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2ed82ade[cfId=2e6ebc00-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=schema_migration_leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [leader leader_hostname took_lead_at]],partitionKeyColumns=[keyspace_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[leader, keyspace_name, took_lead_at, leader_hostname],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-16 05:23:23,856 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_by_cluster_and_keyspace' From org.apache.cassandra.config.CFMetaData@c7f7c01[cfId=2fb03cb0-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_by_cluster_and_keyspace,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name, keyspace_name],clusteringColumns=[repair_schedule_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster_name, repair_schedule_id, keyspace_name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3bbb4324[cfId=2fb03cb0-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_by_cluster_and_keyspace,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name, keyspace_name],clusteringColumns=[repair_schedule_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster_name, repair_schedule_id, keyspace_name],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:23,858] [main] i.c.s.c.Migration016 - alter every table to set `dclocal_read_repair_chance` to zero completed. INFO [Native-Transport-Requests-6] 2025-10-16 05:23:23,862 MigrationManager.java:454 - Update table 'reaper_db/running_reapers' From org.apache.cassandra.config.CFMetaData@353e5fdf[cfId=2f197af0-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=running_reapers,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=180, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host]],partitionKeyColumns=[reaper_instance_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@5f76d636[cfId=2f197af0-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=running_reapers,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=180, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host]],partitionKeyColumns=[reaper_instance_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-9] 2025-10-16 05:23:23,863 MigrationManager.java:454 - Update table 'reaper_db/node_operations' From org.apache.cassandra.config.CFMetaData@7bf0e3cf[cfId=35578880-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@8c451df[cfId=35578880-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-7] 2025-10-16 05:23:23,863 MigrationManager.java:454 - Update table 'reaper_db/schema_migration' From org.apache.cassandra.config.CFMetaData@43932a32[cfId=2e470fc0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@12ea51c0[cfId=2e470fc0-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-8] 2025-10-16 05:23:23,862 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@6b0bece0[cfId=303d1360-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@31e23d1d[cfId=303d1360-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-16 05:23:23,853 MigrationManager.java:454 - Update table 'reaper_db/leader' From org.apache.cassandra.config.CFMetaData@7ff21ca8[cfId=329f6d60-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=600, default_time_to_live=600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host reaper_instance_id]],partitionKeyColumns=[leader_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host, leader_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@25db11db[cfId=329f6d60-aa50-11f0-9b3c-b3a6ba283ee6,ksName=reaper_db,cfName=leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=600, default_time_to_live=600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host reaper_instance_id]],partitionKeyColumns=[leader_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host, leader_id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-10] 2025-10-16 05:23:23,873 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v1' From org.apache.cassandra.config.CFMetaData@7e43760c[cfId=3159f510-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@77c2720[cfId=3159f510-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=2, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-16 05:23:26,316 MigrationManager.java:454 - Update table 'reaper_db/node_operations' From org.apache.cassandra.config.CFMetaData@7bf0e3cf[cfId=35578880-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@26a8a2c0[cfId=35578880-aa50-11f0-91c9-01e025d64bf5,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=30, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] WARN [2025-10-16 05:23:29,939] [main] i.c.ReaperApplication - Reaper is ready to get things done! INFO [Native-Transport-Requests-2] 2025-10-16 05:23:39,306 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=svc_monitor_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-16 05:23:42,267 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table INFO [Native-Transport-Requests-2] 2025-10-16 05:23:43,120 MigrationManager.java:427 - Update Keyspace 'config_db_uuid' From KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@1509319a[cfId=47728b00-aa50-11f0-b61b-4ff51e317e75,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} To KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@1509319a[cfId=47728b00-aa50-11f0-b61b-4ff51e317e75,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-16 05:23:43,920 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table INFO [Native-Transport-Requests-3] 2025-10-16 05:23:44,941 MigrationManager.java:454 - Update table 'config_db_uuid/obj_uuid_table' From org.apache.cassandra.config.CFMetaData@1509319a[cfId=47728b00-aa50-11f0-b61b-4ff51e317e75,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@49a1f3f5[cfId=47728b00-aa50-11f0-b61b-4ff51e317e75,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-16 05:23:46,123 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/service_instance_table' From org.apache.cassandra.config.CFMetaData@6e3ee8ed[cfId=4871f9a0-aa50-11f0-b61b-4ff51e317e75,ksName=svc_monitor_keyspace,cfName=service_instance_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@1942d1b3[cfId=4871f9a0-aa50-11f0-b61b-4ff51e317e75,ksName=svc_monitor_keyspace,cfName=service_instance_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:46,977 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [MigrationStage:1] 2025-10-16 05:23:48,300 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [MigrationStage:1] 2025-10-16 05:23:50,290 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [Native-Transport-Requests-1] 2025-10-16 05:23:53,044 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5135fe29[cfId=4df43140-aa50-11f0-91c9-01e025d64bf5,ksName=svc_monitor_keyspace,cfName=loadbalancer_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:53,194 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [Native-Transport-Requests-1] 2025-10-16 05:23:55,169 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@27dc95cc[cfId=4f387110-aa50-11f0-91c9-01e025d64bf5,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:23:55,280 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [MigrationStage:1] 2025-10-16 05:23:58,272 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-2] 2025-10-16 05:23:59,137 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/healthmonitor_table' From org.apache.cassandra.config.CFMetaData@1f575736[cfId=51034600-aa50-11f0-b61b-4ff51e317e75,ksName=svc_monitor_keyspace,cfName=healthmonitor_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@70b46ecb[cfId=51034600-aa50-11f0-b61b-4ff51e317e75,ksName=svc_monitor_keyspace,cfName=healthmonitor_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-16 05:24:03,404 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-16 05:24:05,433 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [Native-Transport-Requests-2] 2025-10-16 05:24:07,181 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@2bb1863f[cfId=566153d0-aa50-11f0-91c9-01e025d64bf5,ksName=to_bgp_keyspace,cfName=service_chain_ip_address_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:24:07,350 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [MigrationStage:1] 2025-10-16 05:24:09,349 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [Native-Transport-Requests-1] 2025-10-16 05:24:10,155 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_table' From org.apache.cassandra.config.CFMetaData@185a85f1[cfId=578e1400-aa50-11f0-b61b-4ff51e317e75,ksName=to_bgp_keyspace,cfName=service_chain_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7d48d845[cfId=578e1400-aa50-11f0-b61b-4ff51e317e75,ksName=to_bgp_keyspace,cfName=service_chain_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:24:11,313 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table + curl http://10.0.0.38:8071/webui/login.html % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 1940 100 1940 0 0 100k 0 --:--:-- --:--:-- --:--:-- 105k ++ tr -d '\r' ++ curl -v -X POST -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=reaperUser&password=reaperPass' http://10.0.0.38:8071/login ++ awk '-F: ' '/JSESSIONID/ { print $2 }' + jsessionid='JSESSIONID=node05nxp3ly6wnsbsv18ffncem1g0.node0; Path=/' + curl --cookie 'JSESSIONID=node05nxp3ly6wnsbsv18ffncem1g0.node0; Path=/' -H 'Content-Type: application/json' -X POST 'http://10.0.0.38:8071/cluster?seedHost=10.0.0.38&jmxPort=7201' % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 + echo 'Reaper started successfully' Reaper started successfully INFO [Native-Transport-Requests-1] 2025-10-16 05:25:14,945 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-16 05:25:17,294 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [Native-Transport-Requests-6] 2025-10-16 05:25:19,257 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@37eae5b6[cfId=81571980-aa50-11f0-91c9-01e025d64bf5,ksName=dm_keyspace,cfName=dm_pr_asn_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:25:19,352 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [MigrationStage:1] 2025-10-16 05:25:21,214 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [Native-Transport-Requests-5] 2025-10-16 05:25:22,236 MigrationManager.java:454 - Update table 'dm_keyspace/dm_ni_ipv6_ll_table' From org.apache.cassandra.config.CFMetaData@2aa22345[cfId=826f8e60-aa50-11f0-9b3c-b3a6ba283ee6,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@110857fd[cfId=826f8e60-aa50-11f0-9b3c-b3a6ba283ee6,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-16 05:25:23,248 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [HANDSHAKE-/10.0.0.38] 2025-10-16 05:30:32,825 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,136 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,171 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,265 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,296 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,412 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,429 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,496 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,594 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,616 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,675 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,740 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,799 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,862 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,936 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,950 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,971 Validator.java:281 - [repair #3c501700-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:30:33,978 ActiveRepairService.java:452 - [repair #3c44a550-aa51-11f0-b61b-4ff51e317e75] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,650 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,670 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,694 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,720 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,845 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,869 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,881 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,903 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,933 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:30:37,973 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,047 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,077 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,093 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,114 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,140 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,156 Validator.java:281 - [repair #3f124440-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:30:38,162 ActiveRepairService.java:452 - [repair #3f0f8520-aa51-11f0-b61b-4ff51e317e75] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,066 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,095 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,147 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,188 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,311 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,338 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,358 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,385 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,405 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,452 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,524 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,567 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,606 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,634 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,665 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,705 Validator.java:281 - [repair #424f94f0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:30:43,713 ActiveRepairService.java:452 - [repair #424d4b00-aa51-11f0-b61b-4ff51e317e75] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,763 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,789 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,816 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,836 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,850 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,875 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,894 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,916 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:30:47,937 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,000 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,021 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,048 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,076 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,142 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,256 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,298 Validator.java:281 - [repair #4519d880-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:48,320 ActiveRepairService.java:452 - [repair #45174070-aa51-11f0-9b3c-b3a6ba283ee6] Not a global repair, will not do anticompaction INFO [Repair-Task-2] 2025-10-16 05:30:53,111 RepairRunnable.java:139 - Starting repair command #1 (48552d60-aa51-11f0-91c9-01e025d64bf5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 5, pull repair: false) INFO [Repair-Task-2] 2025-10-16 05:30:53,163 RepairSession.java:228 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] new session: will sync /10.0.0.38, /10.0.0.241, /10.0.0.242 on range [(-5893416644393625715,-5883526745566273527], (8081284853295452990,8087372412505995686], (-2981274944881219572,-2976423085650191513], (-1637494631953668305,-1587388840818287539], (-4208232586524590497,-4188011562449307061]] for reaper_db.[cluster, diagnostic_event_subscription, repair_run_by_cluster, repair_schedule_by_cluster_and_keyspace, percent_repaired_by_schedule, schema_migration, repair_run_by_unit, running_repairs, leader, repair_run, repair_schedule_v1, repair_run_by_cluster_v2, repair_unit_v1, schema_migration_leader, snapshot, running_reapers] INFO [RepairJobTask:1] 2025-10-16 05:30:53,269 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:30:53,271 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,279 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,279 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,285 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,286 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,289 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:30:53,292 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:3] 2025-10-16 05:30:53,294 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:5] 2025-10-16 05:30:53,294 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for cluster INFO [RepairJobTask:3] 2025-10-16 05:30:53,294 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] cluster is fully synced INFO [RepairJobTask:4] 2025-10-16 05:30:53,294 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:30:53,295 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,300 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,301 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,305 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,305 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,308 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:30:53,313 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:30:53,313 RepairJob.java:257 - Validating /10.0.0.241 INFO [RepairJobTask:1] 2025-10-16 05:30:53,316 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-16 05:30:53,317 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-10-16 05:30:53,317 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-16 05:30:53,317 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] diagnostic_event_subscription is fully synced INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,320 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,320 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,322 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,323 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,327 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:30:53,327 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-10-16 05:30:53,327 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-10-16 05:30:53,328 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-10-16 05:30:53,331 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster is fully synced INFO [RepairJobTask:1] 2025-10-16 05:30:53,332 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:30:53,332 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,339 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,340 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,346 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,346 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,348 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:30:53,349 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-16 05:30:53,349 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-16 05:30:53,349 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-16 05:30:53,350 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:3] 2025-10-16 05:30:53,351 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:30:53,351 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,353 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,353 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,357 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,357 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,360 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:30:53,361 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-16 05:30:53,360 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-16 05:30:53,362 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:30:53,363 RepairJob.java:257 - Validating /10.0.0.241 INFO [RepairJobTask:6] 2025-10-16 05:30:53,364 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-16 05:30:53,365 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] percent_repaired_by_schedule is fully synced INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,365 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,365 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,368 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,368 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,369 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:30:53,370 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration INFO [RepairJobTask:7] 2025-10-16 05:30:53,372 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:4] 2025-10-16 05:30:53,373 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:6] 2025-10-16 05:30:53,373 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] schema_migration is fully synced INFO [RepairJobTask:7] 2025-10-16 05:30:53,374 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:30:53,374 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,376 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,377 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,380 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,380 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,382 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:30:53,382 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-10-16 05:30:53,383 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-16 05:30:53,383 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-16 05:30:53,384 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-10-16 05:30:53,433 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_repairs (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:30:53,433 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,436 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,436 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,439 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,439 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,441 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:30:53,442 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-16 05:30:53,442 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-16 05:30:53,442 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-16 05:30:53,443 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] running_repairs is fully synced INFO [RepairJobTask:6] 2025-10-16 05:30:53,444 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:30:53,444 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,447 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,447 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,450 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,450 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,452 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:30:53,452 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for leader INFO [RepairJobTask:5] 2025-10-16 05:30:53,452 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:3] 2025-10-16 05:30:53,452 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:5] 2025-10-16 05:30:53,452 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] leader is fully synced INFO [RepairJobTask:7] 2025-10-16 05:30:53,514 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:30:53,514 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,520 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,520 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,523 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,523 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,539 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:30:53,541 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:6] 2025-10-16 05:30:53,541 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:2] 2025-10-16 05:30:53,541 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run INFO [RepairJobTask:6] 2025-10-16 05:30:53,542 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_run is fully synced INFO [RepairJobTask:6] 2025-10-16 05:30:53,550 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:30:53,550 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,555 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,555 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,567 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,568 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,577 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:30:53,581 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-10-16 05:30:53,581 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-16 05:30:53,581 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-10-16 05:30:53,581 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_schedule_v1 is fully synced INFO [RepairJobTask:3] 2025-10-16 05:30:53,585 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:30:53,585 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,587 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,587 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,589 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,589 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,595 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:30:53,597 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-16 05:30:53,596 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-10-16 05:30:53,597 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-16 05:30:53,597 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:3] 2025-10-16 05:30:53,601 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:30:53,602 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,607 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,607 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,610 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,610 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,612 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:30:53,613 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-10-16 05:30:53,613 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_unit_v1 INFO [RepairJobTask:7] 2025-10-16 05:30:53,613 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-10-16 05:30:53,614 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] repair_unit_v1 is fully synced INFO [RepairJobTask:2] 2025-10-16 05:30:53,617 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:30:53,617 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,619 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,619 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,622 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,622 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,624 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:30:53,625 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-16 05:30:53,625 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-10-16 05:30:53,625 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-16 05:30:53,626 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] schema_migration_leader is fully synced INFO [RepairJobTask:6] 2025-10-16 05:30:53,631 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for snapshot (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:30:53,631 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,633 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,633 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,638 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,638 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,641 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:30:53,642 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-16 05:30:53,642 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:30:53,642 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-16 05:30:53,643 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] snapshot is fully synced INFO [RepairJobTask:4] 2025-10-16 05:30:53,646 RepairJob.java:234 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_reapers (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:30:53,647 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,649 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,649 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,655 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,655 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:30:53,657 RepairSession.java:180 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:30:53,658 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-16 05:30:53,658 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:30:53,658 SyncTask.java:66 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-16 05:30:53,659 RepairJob.java:143 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] running_reapers is fully synced INFO [RepairJobTask:1] 2025-10-16 05:30:53,660 RepairSession.java:270 - [repair #485cce80-aa51-11f0-91c9-01e025d64bf5] Session completed successfully INFO [RepairJobTask:1] 2025-10-16 05:30:53,661 RepairRunnable.java:261 - Repair session 485cce80-aa51-11f0-91c9-01e025d64bf5 for range [(-5893416644393625715,-5883526745566273527], (8081284853295452990,8087372412505995686], (-2981274944881219572,-2976423085650191513], (-1637494631953668305,-1587388840818287539], (-4208232586524590497,-4188011562449307061]] finished INFO [RepairJobTask:1] 2025-10-16 05:30:53,665 ActiveRepairService.java:452 - [repair #48552d60-aa51-11f0-91c9-01e025d64bf5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-16 05:30:53,681 RepairRunnable.java:343 - Repair command #1 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-16 05:30:57,972 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:57,988 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,024 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,057 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,072 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,090 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,116 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,143 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,164 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,244 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,283 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,324 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,349 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,361 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,428 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,455 Validator.java:281 - [repair #4b2bcd00-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:30:58,467 ActiveRepairService.java:452 - [repair #4b295c00-aa51-11f0-9b3c-b3a6ba283ee6] Not a global repair, will not do anticompaction INFO [Repair-Task-3] 2025-10-16 05:31:03,160 RepairRunnable.java:139 - Starting repair command #2 (4e52af80-aa51-11f0-91c9-01e025d64bf5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-3] 2025-10-16 05:31:03,186 RepairSession.java:228 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] new session: will sync /10.0.0.38, /10.0.0.241, /10.0.0.242 on range [(1302080116720502536,1375453261952692054]] for reaper_db.[cluster, diagnostic_event_subscription, repair_run_by_cluster, repair_schedule_by_cluster_and_keyspace, percent_repaired_by_schedule, schema_migration, repair_run_by_unit, running_repairs, leader, repair_run, repair_schedule_v1, repair_run_by_cluster_v2, repair_unit_v1, schema_migration_leader, snapshot, running_reapers] INFO [RepairJobTask:3] 2025-10-16 05:31:03,205 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:31:03,205 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,211 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,211 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,214 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,214 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,226 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,228 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for cluster INFO [RepairJobTask:1] 2025-10-16 05:31:03,229 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:1] 2025-10-16 05:31:03,229 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:1] 2025-10-16 05:31:03,229 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] cluster is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:03,236 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:03,236 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,240 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,240 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,244 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,244 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,251 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:03,252 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-16 05:31:03,252 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-16 05:31:03,253 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-16 05:31:03,253 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] diagnostic_event_subscription is fully synced INFO [RepairJobTask:1] 2025-10-16 05:31:03,257 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:31:03,257 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,260 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,260 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,263 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,263 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,264 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:03,265 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-10-16 05:31:03,265 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-10-16 05:31:03,266 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-10-16 05:31:03,266 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:03,269 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:03,270 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,272 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,272 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,277 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,277 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,279 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,279 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-16 05:31:03,280 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-16 05:31:03,280 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-16 05:31:03,281 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:3] 2025-10-16 05:31:03,285 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:31:03,285 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,291 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,291 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,294 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,294 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,296 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:03,303 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:03,304 RepairJob.java:257 - Validating /10.0.0.241 INFO [RepairJobTask:3] 2025-10-16 05:31:03,304 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-16 05:31:03,305 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,306 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,306 RepairJob.java:270 - Validating /10.0.0.242 INFO [RepairJobTask:6] 2025-10-16 05:31:03,307 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-10-16 05:31:03,307 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] percent_repaired_by_schedule is fully synced INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,311 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,311 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,314 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,315 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration INFO [RepairJobTask:5] 2025-10-16 05:31:03,315 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-16 05:31:03,315 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:5] 2025-10-16 05:31:03,316 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] schema_migration is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:03,319 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:03,320 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,321 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,322 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,342 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,342 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,344 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:03,345 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-16 05:31:03,345 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-10-16 05:31:03,345 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-10-16 05:31:03,346 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:03,394 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_repairs (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:03,395 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,398 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,398 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,400 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,400 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,404 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,405 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-16 05:31:03,405 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:5] 2025-10-16 05:31:03,405 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-16 05:31:03,405 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] running_repairs is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:03,408 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:03,409 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,412 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,413 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,416 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,416 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,417 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:31:03,421 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for leader INFO [RepairJobTask:2] 2025-10-16 05:31:03,422 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:6] 2025-10-16 05:31:03,422 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:2] 2025-10-16 05:31:03,424 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] leader is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:03,483 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:03,484 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,500 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,500 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,515 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,515 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,516 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:03,517 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-16 05:31:03,517 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:7] 2025-10-16 05:31:03,519 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:1] 2025-10-16 05:31:03,520 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_run is fully synced INFO [RepairJobTask:1] 2025-10-16 05:31:03,535 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:31:03,535 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,542 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,542 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,547 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,548 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,550 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:03,550 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-10-16 05:31:03,550 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-10-16 05:31:03,551 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-16 05:31:03,551 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_schedule_v1 is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:03,564 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:03,565 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,567 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,567 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,583 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,583 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,585 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,585 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-10-16 05:31:03,585 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-10-16 05:31:03,585 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-10-16 05:31:03,585 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:7] 2025-10-16 05:31:03,601 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:31:03,602 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,604 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,605 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,609 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,609 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,610 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:03,611 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-10-16 05:31:03,611 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:6] 2025-10-16 05:31:03,611 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-10-16 05:31:03,611 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] repair_unit_v1 is fully synced INFO [RepairJobTask:7] 2025-10-16 05:31:03,618 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:31:03,618 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,623 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,623 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,626 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,626 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,628 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:31:03,628 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-16 05:31:03,628 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-10-16 05:31:03,628 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-16 05:31:03,628 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] schema_migration_leader is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:03,631 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for snapshot (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:03,631 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,633 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,633 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,638 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,638 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,640 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:03,642 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:5] 2025-10-16 05:31:03,642 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:31:03,642 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-16 05:31:03,642 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] snapshot is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:03,649 RepairJob.java:234 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_reapers (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:03,649 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,653 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,653 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,656 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,656 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:03,658 RepairSession.java:180 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:7] 2025-10-16 05:31:03,658 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:5] 2025-10-16 05:31:03,658 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:31:03,658 SyncTask.java:66 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_reapers INFO [RepairJobTask:5] 2025-10-16 05:31:03,659 RepairJob.java:143 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] running_reapers is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:03,659 RepairSession.java:270 - [repair #4e56a720-aa51-11f0-91c9-01e025d64bf5] Session completed successfully INFO [RepairJobTask:5] 2025-10-16 05:31:03,660 RepairRunnable.java:261 - Repair session 4e56a720-aa51-11f0-91c9-01e025d64bf5 for range [(1302080116720502536,1375453261952692054]] finished INFO [RepairJobTask:5] 2025-10-16 05:31:03,660 ActiveRepairService.java:452 - [repair #4e52af80-aa51-11f0-91c9-01e025d64bf5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-16 05:31:03,667 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,051 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,069 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,081 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,093 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,159 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,172 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,189 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,216 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,232 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,259 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,319 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,360 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,390 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,425 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,439 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,454 Validator.java:281 - [repair #513201b0-aa51-11f0-b61b-4ff51e317e75] Sending completed merkle tree to /10.0.0.242 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:31:08,464 ActiveRepairService.java:452 - [repair #512cd190-aa51-11f0-b61b-4ff51e317e75] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,313 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,338 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,351 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,365 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,376 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,390 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,401 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,420 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,434 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,536 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,554 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,569 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,580 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,594 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,646 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,665 Validator.java:281 - [repair #544de7b0-aa51-11f0-9b3c-b3a6ba283ee6] Sending completed merkle tree to /10.0.0.241 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-16 05:31:13,677 ActiveRepairService.java:452 - [repair #544c12f0-aa51-11f0-9b3c-b3a6ba283ee6] Not a global repair, will not do anticompaction INFO [Repair-Task-4] 2025-10-16 05:31:17,963 RepairRunnable.java:139 - Starting repair command #3 (572571b0-aa51-11f0-91c9-01e025d64bf5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-4] 2025-10-16 05:31:17,974 RepairSession.java:228 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] new session: will sync /10.0.0.38, /10.0.0.241, /10.0.0.242 on range [(5110901750269140395,5223334753160547478]] for reaper_db.[cluster, diagnostic_event_subscription, repair_run_by_cluster, repair_schedule_by_cluster_and_keyspace, percent_repaired_by_schedule, schema_migration, repair_run_by_unit, running_repairs, leader, repair_run, repair_schedule_v1, repair_run_by_cluster_v2, repair_unit_v1, schema_migration_leader, snapshot, running_reapers] INFO [RepairJobTask:3] 2025-10-16 05:31:18,019 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:31:18,021 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,023 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,023 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,026 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,026 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,035 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:18,036 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:4] 2025-10-16 05:31:18,036 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:5] 2025-10-16 05:31:18,036 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for cluster INFO [RepairJobTask:4] 2025-10-16 05:31:18,036 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] cluster is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:18,042 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:18,043 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,047 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,047 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,052 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,052 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,054 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:18,054 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-16 05:31:18,054 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-16 05:31:18,054 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-16 05:31:18,054 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] diagnostic_event_subscription is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:18,059 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:18,059 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,061 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,061 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,064 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,065 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,066 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:18,066 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-16 05:31:18,067 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-16 05:31:18,067 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-16 05:31:18,067 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:18,072 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:18,072 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,080 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,080 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,084 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,084 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,086 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:18,087 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-10-16 05:31:18,087 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-16 05:31:18,088 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-16 05:31:18,090 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:18,095 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:18,095 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,105 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,105 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,110 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,110 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,116 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:18,116 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-16 05:31:18,116 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-16 05:31:18,119 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-16 05:31:18,119 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:18,127 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:18,137 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,154 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,154 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,158 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,159 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,172 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:18,172 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-16 05:31:18,175 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:18,176 RepairJob.java:257 - Validating /10.0.0.241 INFO [RepairJobTask:5] 2025-10-16 05:31:18,176 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:6] 2025-10-16 05:31:18,176 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:5] 2025-10-16 05:31:18,177 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] schema_migration is fully synced INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,177 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,178 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,184 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,185 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,187 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:18,187 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-10-16 05:31:18,188 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-10-16 05:31:18,192 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-10-16 05:31:18,194 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:18,236 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_repairs (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:18,237 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,239 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,239 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,253 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,253 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,257 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:18,257 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-16 05:31:18,258 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:5] 2025-10-16 05:31:18,258 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-16 05:31:18,258 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] running_repairs is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:18,263 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:18,263 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,268 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,269 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,275 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,275 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,276 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:18,277 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:5] 2025-10-16 05:31:18,277 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for leader INFO [RepairJobTask:6] 2025-10-16 05:31:18,277 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:5] 2025-10-16 05:31:18,277 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] leader is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:18,342 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:18,342 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,345 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,345 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,350 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,350 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,352 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:31:18,353 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-16 05:31:18,353 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:6] 2025-10-16 05:31:18,354 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-16 05:31:18,355 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_run is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:18,360 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:18,361 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,365 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,365 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,369 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,369 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,377 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:18,378 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-16 05:31:18,378 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-10-16 05:31:18,378 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-16 05:31:18,378 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_schedule_v1 is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:18,381 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:18,381 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,388 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,388 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,390 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,390 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,394 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:31:18,395 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-16 05:31:18,395 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-16 05:31:18,395 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-16 05:31:18,395 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:18,398 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:18,399 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,402 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,402 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,405 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,405 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,406 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:31:18,407 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-16 05:31:18,407 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-16 05:31:18,407 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-16 05:31:18,407 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] repair_unit_v1 is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:18,417 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:18,417 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,419 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,420 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,421 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,421 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,422 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:18,423 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-16 05:31:18,423 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration_leader INFO [RepairJobTask:7] 2025-10-16 05:31:18,423 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-10-16 05:31:18,424 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:18,428 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for snapshot (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:18,428 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,429 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,429 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,431 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,431 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,432 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:18,432 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:31:18,432 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:31:18,433 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:31:18,433 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] snapshot is fully synced INFO [RepairJobTask:7] 2025-10-16 05:31:18,434 RepairJob.java:234 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_reapers (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:31:18,434 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,435 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,435 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,438 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,438 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:18,440 RepairSession.java:180 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:18,440 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_reapers INFO [RepairJobTask:3] 2025-10-16 05:31:18,440 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:31:18,440 SyncTask.java:66 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:31:18,440 RepairJob.java:143 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] running_reapers is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:18,441 RepairSession.java:270 - [repair #57271f60-aa51-11f0-91c9-01e025d64bf5] Session completed successfully INFO [RepairJobTask:4] 2025-10-16 05:31:18,441 RepairRunnable.java:261 - Repair session 57271f60-aa51-11f0-91c9-01e025d64bf5 for range [(5110901750269140395,5223334753160547478]] finished INFO [RepairJobTask:4] 2025-10-16 05:31:18,442 ActiveRepairService.java:452 - [repair #572571b0-aa51-11f0-91c9-01e025d64bf5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-16 05:31:18,446 RepairRunnable.java:343 - Repair command #3 finished in 0 seconds INFO [Repair-Task-5] 2025-10-16 05:31:23,275 RepairRunnable.java:139 - Starting repair command #4 (5a4ffdb0-aa51-11f0-91c9-01e025d64bf5), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 4, pull repair: false) INFO [Repair-Task-5] 2025-10-16 05:31:23,311 RepairSession.java:228 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] new session: will sync /10.0.0.38, /10.0.0.241, /10.0.0.242 on range [(-7849744158542127125,-7827623543322959562], (591147859881439134,604780858942883370], (6322120371412670760,6348883303513305445], (3528462649820529262,3537908353339593684]] for reaper_db.[cluster, diagnostic_event_subscription, repair_run_by_cluster, repair_schedule_by_cluster_and_keyspace, percent_repaired_by_schedule, schema_migration, repair_run_by_unit, running_repairs, leader, repair_run, repair_schedule_v1, repair_run_by_cluster_v2, repair_unit_v1, schema_migration_leader, snapshot, running_reapers] INFO [RepairJobTask:1] 2025-10-16 05:31:23,438 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:31:23,439 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,449 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,449 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,451 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,452 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,457 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for cluster from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:23,459 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:4] 2025-10-16 05:31:23,459 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for cluster INFO [RepairJobTask:5] 2025-10-16 05:31:23,459 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for cluster INFO [RepairJobTask:4] 2025-10-16 05:31:23,459 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] cluster is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:23,475 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:23,475 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,482 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,482 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,486 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,486 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,499 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:31:23,502 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-16 05:31:23,503 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-16 05:31:23,503 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-16 05:31:23,503 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] diagnostic_event_subscription is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:23,504 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:23,505 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,506 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,506 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,509 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,509 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,511 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:23,511 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-10-16 05:31:23,525 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:23,525 RepairJob.java:257 - Validating /10.0.0.241 INFO [RepairJobTask:6] 2025-10-16 05:31:23,525 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-10-16 05:31:23,525 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-10-16 05:31:23,526 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster is fully synced INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,537 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,537 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,543 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,543 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,546 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:23,547 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-16 05:31:23,547 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-10-16 05:31:23,547 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-10-16 05:31:23,547 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:23,559 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:23,560 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,564 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,564 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,566 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,567 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,570 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [RepairJobTask:1] 2025-10-16 05:31:23,571 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-16 05:31:23,571 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-10-16 05:31:23,571 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-10-16 05:31:23,572 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-10-16 05:31:23,574 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:5] 2025-10-16 05:31:23,574 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,577 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,577 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,579 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,580 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,592 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration from /10.0.0.38 INFO [RepairJobTask:2] 2025-10-16 05:31:23,593 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-16 05:31:23,593 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-16 05:31:23,593 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-16 05:31:23,593 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] schema_migration is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:23,596 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:23,596 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,599 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,599 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,601 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,601 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,604 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:23,604 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-10-16 05:31:23,604 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-10-16 05:31:23,618 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-16 05:31:23,630 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-10-16 05:31:23,678 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_repairs (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:1] 2025-10-16 05:31:23,678 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,680 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,680 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,684 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,684 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,686 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_repairs from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:23,693 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:3] 2025-10-16 05:31:23,693 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-16 05:31:23,693 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_repairs INFO [RepairJobTask:3] 2025-10-16 05:31:23,693 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] running_repairs is fully synced INFO [RepairJobTask:3] 2025-10-16 05:31:23,697 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:31:23,700 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,703 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,707 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,712 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,712 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,720 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for leader from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:23,728 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:2] 2025-10-16 05:31:23,728 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for leader INFO [RepairJobTask:5] 2025-10-16 05:31:23,723 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for leader INFO [RepairJobTask:7] 2025-10-16 05:31:23,730 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] leader is fully synced INFO [RepairJobTask:7] 2025-10-16 05:31:23,844 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:7] 2025-10-16 05:31:23,844 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,848 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,849 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,850 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,851 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,852 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:23,852 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-16 05:31:23,852 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:1] 2025-10-16 05:31:23,852 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-16 05:31:23,852 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_run is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:23,856 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:23,856 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,859 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,859 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,864 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,866 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,869 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:23,870 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-16 05:31:23,870 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-16 05:31:23,870 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-16 05:31:23,870 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_schedule_v1 is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:23,876 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:4] 2025-10-16 05:31:23,876 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,881 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,881 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,887 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,887 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,892 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:23,893 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-16 05:31:23,893 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-10-16 05:31:23,893 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-16 05:31:23,893 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:3] 2025-10-16 05:31:23,897 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:3] 2025-10-16 05:31:23,897 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,899 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,899 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,901 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,902 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,903 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [RepairJobTask:4] 2025-10-16 05:31:23,904 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-10-16 05:31:23,905 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-16 05:31:23,905 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-10-16 05:31:23,905 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] repair_unit_v1 is fully synced INFO [RepairJobTask:2] 2025-10-16 05:31:23,917 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for schema_migration_leader (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:2] 2025-10-16 05:31:23,918 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,923 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,923 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,927 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,927 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,929 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [RepairJobTask:3] 2025-10-16 05:31:23,930 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-16 05:31:23,930 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-16 05:31:23,930 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-16 05:31:23,930 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] schema_migration_leader is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:23,940 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for snapshot (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:23,940 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,943 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,943 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,948 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,949 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,952 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for snapshot from /10.0.0.38 INFO [RepairJobTask:5] 2025-10-16 05:31:23,954 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for snapshot INFO [RepairJobTask:1] 2025-10-16 05:31:23,954 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:7] 2025-10-16 05:31:23,962 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for snapshot INFO [RepairJobTask:6] 2025-10-16 05:31:23,962 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] snapshot is fully synced INFO [RepairJobTask:6] 2025-10-16 05:31:23,972 RepairJob.java:234 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Requesting merkle trees for running_reapers (to [/10.0.0.241, /10.0.0.242, /10.0.0.38]) INFO [RepairJobTask:6] 2025-10-16 05:31:23,972 RepairJob.java:257 - Validating /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,976 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.241 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,976 RepairJob.java:270 - Validating /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,983 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.242 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,983 RepairJob.java:270 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-10-16 05:31:23,989 RepairSession.java:180 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Received merkle tree for running_reapers from /10.0.0.38 INFO [RepairJobTask:6] 2025-10-16 05:31:23,992 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.242 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:31:23,992 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.241 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:1] 2025-10-16 05:31:23,993 SyncTask.java:66 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Endpoints /10.0.0.242 and /10.0.0.38 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-16 05:31:23,993 RepairJob.java:143 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] running_reapers is fully synced INFO [RepairJobTask:4] 2025-10-16 05:31:23,994 RepairSession.java:270 - [repair #5a5554e0-aa51-11f0-91c9-01e025d64bf5] Session completed successfully INFO [RepairJobTask:4] 2025-10-16 05:31:23,995 RepairRunnable.java:261 - Repair session 5a5554e0-aa51-11f0-91c9-01e025d64bf5 for range [(-7849744158542127125,-7827623543322959562], (591147859881439134,604780858942883370], (6322120371412670760,6348883303513305445], (3528462649820529262,3537908353339593684]] finished INFO [RepairJobTask:4] 2025-10-16 05:31:23,996 ActiveRepairService.java:452 - [repair #5a4ffdb0-aa51-11f0-91c9-01e025d64bf5] Not a global repair, will not do anticompaction INFO [InternalResponseStage:7] 2025-10-16 05:31:24,015 RepairRunnable.java:343 - Repair command #4 finished in 0 seconds