++ LOG_DIR=/var/log/contrail ++ export CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ mkdir -p /var/log/contrail/config-database-cassandra ++ log_file=/var/log/contrail/config-database-cassandra/console.log ++ touch /var/log/contrail/config-database-cassandra/console.log ++ chmod 600 /var/log/contrail/config-database-cassandra/console.log ++ exec +++ tee -a /var/log/contrail/config-database-cassandra/console.log +++ date ++ echo 'INFO: =================== Thu Oct 30 05:15:26 UTC 2025 ===================' INFO: =================== Thu Oct 30 05:15:26 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ ip route get 1 +++++ grep -o 'dev.*' +++++ awk '{print $2}' ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ get_cidr_for_nic ens3 +++ command -v ip +++ cut -d / -f 1 +++ local nic=ens3 +++ ip addr show dev ens3 +++ grep 'inet ' +++ head -n 1 +++ awk '{print $2}' ++ DEFAULT_LOCAL_IP=10.0.0.48 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=256 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 256 ]] ++ [[ -z '' ]] +++ mount -t hugetlbfs +++ awk '/pagesize=2M/{print($3)}' +++ tail -n 1 ++ HUGE_PAGES_2MB_DIR= ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=openstack ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=rbac ++ AUTH_MODE=keystone ++ AUTH_PARAMS= ++ SSL_ENABLE=false ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ANALYTICSDB_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ANALYTICS_SNMP_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:8081 ' +++ '[' -n '10.0.0.254:8081 10.0.0.48:8081 10.0.0.49:8081 ' ']' +++ echo '10.0.0.254:8081 10.0.0.48:8081 10.0.0.49:8081' ++ ANALYTICS_SERVERS='10.0.0.254:8081 10.0.0.48:8081 10.0.0.49:8081' +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:9042 ' +++ '[' -n '10.0.0.254:9042 10.0.0.48:9042 10.0.0.49:9042 ' ']' +++ echo '10.0.0.254:9042 10.0.0.48:9042 10.0.0.49:9042' ++ ANALYTICSDB_CQL_SERVERS='10.0.0.254:9042 10.0.0.48:9042 10.0.0.49:9042' ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:8086 ' +++ '[' -n '10.0.0.254:8086 10.0.0.48:8086 10.0.0.49:8086 ' ']' +++ echo '10.0.0.254:8086 10.0.0.48:8086 10.0.0.49:8086' ++ COLLECTOR_SERVERS='10.0.0.254:8086 10.0.0.48:8086 10.0.0.49:8086' ++ CASSANDRA_PORT=9161 ++ CASSANDRA_CQL_PORT=9041 ++ CASSANDRA_SSL_STORAGE_PORT=7013 ++ CASSANDRA_STORAGE_PORT=7012 ++ CASSANDRA_JMX_LOCAL_PORT=7201 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ CONFIGDB_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:8082 ' +++ '[' -n '10.0.0.254:8082 10.0.0.48:8082 10.0.0.49:8082 ' ']' +++ echo '10.0.0.254:8082 10.0.0.48:8082 10.0.0.49:8082' ++ CONFIG_SERVERS='10.0.0.254:8082 10.0.0.48:8082 10.0.0.49:8082' +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:9161 ' +++ '[' -n '10.0.0.254:9161 10.0.0.48:9161 10.0.0.49:9161 ' ']' +++ echo '10.0.0.254:9161 10.0.0.48:9161 10.0.0.49:9161' ++ CONFIGDB_SERVERS='10.0.0.254:9161 10.0.0.48:9161 10.0.0.49:9161' +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:9041 ' +++ '[' -n '10.0.0.254:9041 10.0.0.48:9041 10.0.0.49:9041 ' ']' +++ echo '10.0.0.254:9041 10.0.0.48:9041 10.0.0.49:9041' ++ CONFIGDB_CQL_SERVERS='10.0.0.254:9041 10.0.0.48:9041 10.0.0.49:9041' ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=false ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=false ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=false ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=true ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.20.0.254,10.20.0.14,10.20.0.5 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.20.0.254,10.20.0.14,10.20.0.5 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+=10.0.0.48:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+=10.0.0.49:2181, +++ '[' -n 10.0.0.254:2181,10.0.0.48:2181,10.0.0.49:2181, ']' +++ echo 10.0.0.254:2181,10.0.0.48:2181,10.0.0.49:2181 ++ ZOOKEEPER_SERVERS=10.0.0.254:2181,10.0.0.48:2181,10.0.0.49:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:2181 ' +++ '[' -n '10.0.0.254:2181 10.0.0.48:2181 10.0.0.49:2181 ' ']' +++ echo '10.0.0.254:2181 10.0.0.48:2181 10.0.0.49:2181' ++ ZOOKEEPER_SERVERS_SPACE_DELIM='10.0.0.254:2181 10.0.0.48:2181 10.0.0.49:2181' ++ RABBITMQ_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+=10.0.0.48:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+=10.0.0.49:5673, +++ '[' -n 10.0.0.254:5673,10.0.0.48:5673,10.0.0.49:5673, ']' +++ echo 10.0.0.254:5673,10.0.0.48:5673,10.0.0.49:5673 ++ RABBITMQ_SERVERS=10.0.0.254:5673,10.0.0.48:5673,10.0.0.49:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=false ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:6379 ' +++ '[' -n '10.0.0.254:6379 10.0.0.48:6379 10.0.0.49:6379 ' ']' +++ echo '10.0.0.254:6379 10.0.0.48:6379 10.0.0.49:6379' ++ REDIS_SERVERS='10.0.0.254:6379 10.0.0.48:6379 10.0.0.49:6379' ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=false ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.49 +++ local server_address=10.0.0.49 +++ extended_server_list+='10.0.0.49:9092 ' +++ '[' -n '10.0.0.254:9092 10.0.0.48:9092 10.0.0.49:9092 ' ']' +++ echo '10.0.0.254:9092 10.0.0.48:9092 10.0.0.49:9092' ++ KAFKA_SERVERS='10.0.0.254:9092 10.0.0.48:9092 10.0.0.49:9092' ++ KAFKA_SSL_ENABLE=false ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/v3 ++ KEYSTONE_AUTH_HOST=10.0.0.254 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.254,10.0.0.48,10.0.0.49 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=false ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=false ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=false ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ /v3 == \/\v\2\.\0 ]] ++ [[ openstack == \o\p\e\n\s\t\a\c\k ]] ++ AUTH_MODE=keystone ++ [[ keystone == \k\e\y\s\t\o\n\e ]] ++ AUTH_PARAMS='--admin_password contrail123' ++ AUTH_PARAMS+=' --admin_tenant_name admin' ++ AUTH_PARAMS+=' --admin_user admin' ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + CONFIG=/etc/cassandra/cassandra.yaml + JVM_OPTIONS_CONFIG=/etc/cassandra/jvm.options + cp /etc/cassandra/cassandra.origin /etc/cassandra/cassandra.yaml + cp /etc/cassandra/jvm.options.origin /etc/cassandra/jvm.options + for i in '{1..10}' ++ find_my_ip_and_order_for_node_list 10.0.0.254,10.0.0.48,10.0.0.49 ++ cut -d ' ' -f 1 ++ local servers=10.0.0.254,10.0.0.48,10.0.0.49 ++ local server_list= ++ IFS=, ++ read -ra server_list +++ get_local_ips +++ tr '\n' , +++ awk '/32 host/ { print f } {f=$2}' +++ cat /proc/net/fib_trie +++ uniq +++ sort +++ grep -vi host ++ local local_ips=,10.0.0.48,10.20.0.14,127.0.0.1,172.17.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.254'\''))' ++ local server_ip=10.0.0.254 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.254 ]] ++ [[ ,10.0.0.48,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.254, ]] ++ (( ord+=1 )) ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.48'\''))' ++ local server_ip=10.0.0.48 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.48 ]] ++ [[ ,10.0.0.48,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.48, ]] ++ echo 10.0.0.48 2 ++ return + my_ip=10.0.0.48 + '[' -n 10.0.0.48 ']' + break + '[' -z 10.0.0.48 ']' ++ echo 10.0.0.254,10.0.0.48,10.0.0.49 ++ tr , ' ' ++ wc -w + export CASSANDRA_COUNT=3 + CASSANDRA_COUNT=3 ++ echo 10.0.0.254,10.0.0.48,10.0.0.49 ++ sed 's/,/", "/g' + export 'CASSANDRA_CONNECT_POINTS=10.0.0.254", "10.0.0.48", "10.0.0.49' + CASSANDRA_CONNECT_POINTS='10.0.0.254", "10.0.0.48", "10.0.0.49' ++ echo 10.0.0.254,10.0.0.48,10.0.0.49 ++ cut -d , -f 1,2 + export CASSANDRA_SEEDS=10.0.0.254,10.0.0.48 + CASSANDRA_SEEDS=10.0.0.254,10.0.0.48 + export CASSANDRA_LISTEN_ADDRESS=10.0.0.48 + CASSANDRA_LISTEN_ADDRESS=10.0.0.48 + export CASSANDRA_RPC_ADDRESS=10.0.0.48 + CASSANDRA_RPC_ADDRESS=10.0.0.48 + echo 'INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g' INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g + for yaml in Xmx Xms ++ sed -n 's/.*\(-Xmx[0-9]*[mMgG]\).*/\1/p' ++ echo -Xms1g -Xmx2g + opt=-Xmx2g + [[ -n -Xmx2g ]] ++ echo -Xms1g -Xmx2g ++ sed 's/-Xmx[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS='-Xms1g ' + sed -i 's/^[#]*-Xmx.*/-Xmx2g/g' /etc/cassandra/jvm.options + for yaml in Xmx Xms ++ echo -Xms1g ++ sed -n 's/.*\(-Xms[0-9]*[mMgG]\).*/\1/p' + opt=-Xms1g + [[ -n -Xms1g ]] ++ echo -Xms1g ++ sed 's/-Xms[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS= + sed -i 's/^[#]*-Xms.*/-Xms1g/g' /etc/cassandra/jvm.options + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + is_enabled true + local val=true + [[ true == \t\r\u\e ]] + export LOCAL_JMX=no + LOCAL_JMX=no + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + is_enabled false + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + cat + change_variable memtable_flush_writers 4 + local VARIABLE_NAME=memtable_flush_writers + local VARIABLE_VALUE=4 + sed -i 's/.*\(memtable_flush_writers\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_compactors 4 + local VARIABLE_NAME=concurrent_compactors + local VARIABLE_VALUE=4 + sed -i 's/.*\(concurrent_compactors\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable compaction_throughput_mb_per_sec 256 + local VARIABLE_NAME=compaction_throughput_mb_per_sec + local VARIABLE_VALUE=256 + sed -i 's/.*\(compaction_throughput_mb_per_sec\):.*\([0-9a-z]\)/\1: 256/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_reads 64 + local VARIABLE_NAME=concurrent_reads + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_reads\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_writes 64 + local VARIABLE_NAME=concurrent_writes + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_writes\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable memtable_allocation_type offheap_objects + local VARIABLE_NAME=memtable_allocation_type + local VARIABLE_VALUE=offheap_objects + sed -i 's/.*\(memtable_allocation_type\):.*\([0-9a-z]\)/\1: offheap_objects/g' /etc/cassandra/cassandra.yaml + log_levels_map=([SYS_DEBUG]='DEBUG' [SYS_INFO]='INFO' [SYS_NOTICE]='INFO' [SYS_ERROR]="ERROR") + declare -A log_levels_map + log_level=DEBUG + '[' -n DEBUG ']' + sed -i 's/\(; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.48; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.48; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.254,10.0.0.48}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-10-30 05:15:28,767 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-10-30 05:15:28,768 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-10-30 05:15:28,768 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-10-30 05:15:28,794 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-10-30 05:15:28,795 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-10-30 05:15:29,159 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-10-30 05:15:29,164 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-4536-2. INFO [main] 2025-10-30 05:15:29,165 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-10-30 05:15:29,167 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-10-30 05:15:29,177 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4319616(4218K) committed = 4390912(4288K) max = 251658240(245760K) INFO [main] 2025-10-30 05:15:29,178 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 20353624(19876K) committed = 20971520(20480K) max = -1(-1K) INFO [main] 2025-10-30 05:15:29,178 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2398392(2342K) committed = 2621440(2560K) max = 1073741824(1048576K) INFO [main] 2025-10-30 05:15:29,178 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 100705544(98345K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-10-30 05:15:29,178 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-10-30 05:15:29,178 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-10-30 05:15:29,179 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-10-30 05:15:29,179 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-10-30 05:15:29,234 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-10-30 05:15:29,236 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-10-30 05:15:29,237 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-10-30 05:15:29,240 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-10-30 05:15:29,250 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-10-30 05:15:29,251 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-10-30 05:15:29,276 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-10-30 05:15:29,281 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-10-30 05:15:29,282 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-10-30 05:15:29,330 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-10-30 05:15:29,982 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-10-30 05:15:31,224 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-10-30 05:15:31,249 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-10-30 05:15:31,269 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-10-30 05:15:31,291 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-10-30 05:15:31,299 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-10-30 05:15:31,307 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-10-30 05:15:31,322 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-10-30 05:15:31,330 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-10-30 05:15:31,357 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-10-30 05:15:31,377 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-10-30 05:15:31,394 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-10-30 05:15:31,409 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-10-30 05:15:31,417 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-10-30 05:15:31,424 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-10-30 05:15:31,445 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-10-30 05:15:31,451 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-10-30 05:15:31,461 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-10-30 05:15:31,472 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-10-30 05:15:31,483 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-10-30 05:15:31,494 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-10-30 05:15:31,510 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-10-30 05:15:31,521 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-10-30 05:15:31,537 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-10-30 05:15:31,542 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-10-30 05:15:31,760 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-10-30 05:15:31,832 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-10-30 05:15:31,881 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-10-30 05:15:31,895 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-10-30 05:15:31,938 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-10-30 05:15:31,949 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-10-30 05:15:32,020 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-10-30 05:15:32,024 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-10-30 05:15:32,042 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-10-30 05:15:32,052 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-10-30 05:15:32,063 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-10-30 05:15:32,070 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:1] 2025-10-30 05:15:33,456 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:1] 2025-10-30 05:15:33,610 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:1] 2025-10-30 05:15:33,612 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:1] 2025-10-30 05:15:33,626 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:4] 2025-10-30 05:15:33,976 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-10-30 05:15:34,062 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-30 05:15:34,100 StorageService.java:607 - Token metadata: INFO [pool-4-thread-1] 2025-10-30 05:15:34,165 AutoSavingCache.java:174 - Completed loading (1 ms; 8 keys) KeyCache cache INFO [main] 2025-10-30 05:15:34,179 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-10-30 05:15:34,180 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-10-30 05:15:34,195 StorageService.java:607 - Token metadata: INFO [main] 2025-10-30 05:15:34,341 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-10-30 05:15:34,342 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-10-30 05:15:34,342 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-10-30 05:15:34,342 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-10-30 05:15:34,342 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-10-30 05:15:34,386 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-10-30 05:15:34,407 MessagingService.java:761 - Starting Messaging Service on /10.0.0.48:7012 (ens3) WARN [main] 2025-10-30 05:15:34,412 SystemKeyspace.java:1087 - No host ID found, created 4b056332-7cf4-445f-a8a6-c91cacb6a1c7 (Note: This should happen exactly once per node). INFO [main] 2025-10-30 05:15:34,451 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.49] 2025-10-30 05:15:34,524 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.49 INFO [HANDSHAKE-/10.0.0.254] 2025-10-30 05:15:35,177 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [main] 2025-10-30 05:15:35,467 StorageService.java:550 - Unable to gossip with any peers but continuing anyway since node is in its own seed list INFO [main] 2025-10-30 05:15:35,490 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-10-30 05:15:35,495 StorageService.java:822 - Starting up server gossip INFO [main] 2025-10-30 05:15:35,614 StorageService.java:883 - This node will not auto bootstrap because it is configured to be a seed node. INFO [main] 2025-10-30 05:15:35,635 BootStrapper.java:228 - Generated random tokens. tokens are [-463204804678985499, 587618003226762405, 2991860336784255239, 639554254015017205, 2680244624312885805, 3480896430855248790, 4590619175805916524, -6173159874534440648, 2777436686166820470, -3429667182702427495, -1926382290171162513, -1431824098557796552, -8035019746340583320, -3752465171281841011, -7201696571648263732, -8629262361111849988, 4280971537932076633, -4747428343030432733, -1065024708608366896, 4929458424837058403, 5887556364178169646, -5259202091033846954, 144184123083797046, 3216440202837586100, 5773955012701634628, 7863067410736253673, 258304206355078176, -1672158157936872931, 6862765843330659917, 6896474707284756633, 7941570712081814583, -2475359916012669429, -3248145846232390926, -4367080470498912143, -3082463087219873248, -6251373059437020702, -5189872748071290820, -4991491798978243848, -201481978985908320, -7166152740405034177, -6233602061778920894, -5265617854384764765, 1049504637851029263, 4900102734734364650, 951783258413818459, -2571148661064320157, -2755707449440863725, 2523136083656166907, -1857724796506634615, 2370571544997794043, -6765759259480411287, 5492959149865408532, -8104028102805435150, -4248368854968735065, 8458479041266680850, -6860158054522006763, 8436651894244339521, 7616632153166141701, 2965091317075666808, 6949982206360572047, 6149675338121747630, -5124103380778050090, 7315284805721557226, 258657458467201538, -5557964694142923942, -8431442422707239635, 5276560457534304183, -7997392427589403477, 5076989869607188900, -4610029104210844354, 2822050558052699969, -5412666877010021368, 5765700130263898879, -350869051798362818, 5289701606944575950, -3405269340059569000, -2311986215237687674, 6243520130722239459, -6488010124545449701, -8178594981807820207, -7059788074975691460, -1800760809078877849, 8519118581776645546, 5967322420771423267, -2063854862611278531, 6462206416944173179, 6875792997308466999, -872631756207821622, -7348854591859399376, -3575240757548197275, -2209170682326311234, 7271108558789276419, -6563887421863618214, 5663531567912224833, 8503627381975514709, 3119975504414049357, -197447825799164838, -8711624614160327600, 1055170877057031781, 5742815448253299850, -7918978881679992866, -6699493526867885073, -2564476753535807074, -3048279950713959606, -4038525623742564369, -7834752276916369162, 414381088678713386, 2257519325928230360, -4024979327632657804, 3055375756625601967, 7979717760415405071, 2712662674374345893, 1521828497905840055, 6425020130049135709, 4827405207832854736, 2041112744397152734, -827968308444111177, -1091598613440751515, 7093064431755358136, -1293382232552385886, -7276921140486535181, -6386770463196717434, 8463788667016589552, -7912418535351815257, 6324529983229935627, -4217511581106855415, -2716532082476042799, -5537152412410719764, 7524842404210454246, -1960622969925667041, 8206281430878394362, 8267281926942945744, 6279108490032650563, 3120257039895098801, 1740993081051891788, 5846078244755635453, 3121537795714563297, 6112851482677179977, 3961013525087107674, 5785999240900030019, -6647822634160379633, -7857462546554434048, -7384042611888002847, -1079784908113921155, -7661854186140913528, 5944690967253507875, 3235935169016164184, 1733198679004278019, -7487753843699048849, 8658206395128997478, -2054398561573699201, -3707205507271686096, 8142980887876596576, -3659554304762911004, 533779584463614891, 1880843649758636955, -1120911503463125812, 7343463795674533343, -6294998097579771766, -6354956780904323636, -4111739584836109834, 3926399269915072966, -5599039357030607717, 5983577477885795631, 4056878117617293700, -5200047333814578981, 6032423592836079133, -5718488104278986602, -8633763139295859651, 5340536603567763795, 4149570280465902939, 8323274267001521604, 1866538248003742186, 3917333220373124067, 4711695953066814094, 6116877479717968699, 3774515308811323497, 1469111606355663423, -281746871425200671, -3518685734766816959, 8882108062813786814, 4815194463248174143, -7546720390674201789, -8531687378166954795, 8754660861440872078, -8477211810367734082, -4466656156424759055, -2481749340235792556, 8758164848733491570, -9131465279725711940, 8457397071986792399, -916736372814794075, 4465287569907322283, -6794094132931781720, 3849548248006342028, 3866324842043042581, 7900681199592750138, -412289987331506707, 1732237346601980196, -5581958103364156793, 3457412895226099968, 1894321356117520167, 4744414079544796037, 4075924886073735322, 6587550873674215258, 6720683168937857283, 5788897219181071372, -4648503405327762937, 6566358172334424226, -5158567282874973742, -5585860530730571662, -951361438436193385, 7442718946106951270, 764282094850149944, -4787499395566089640, 5220224668472295288, -7131342554624055531, -7446093629092617237, 1444225402154864113, -3714219370153291059, -8678145454453791612, -4723118079575023524, 2830350370781267456, -1244263070847895896, -3029177844449423646, 7856170517776411968, 7169495725180925646, 3474089669895729191, -4907749365381543918, -9127790424307977585, -3857795752282317914, 955657465906397624, 3841283836759738451, 6339726885152851627, -6978297710775658109, -1538616951082151891, 7908477335243936083, 4190071395029709830, 2913413605914133500, -3746534098123065719, 4280584822134402953, 1209395328704530423, 3064111850176583623, -3687099308035188880, 4895054178459339913, 1387723860445655010, 5182990281741526449, 8081994164102354689, -193263854696002082, -5150755181213423934, 7743438347477783341, -1795610872258678340, 6912315066889652860, 440923715688702603, 3501115751694557200, -6668809521966550038] INFO [main] 2025-10-30 05:15:35,641 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_traces, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2}}, tables=[org.apache.cassandra.config.CFMetaData@25528f49[cfId=c5e99f16-8677-3914-b17e-960613512345,ksName=system_traces,cfName=sessions,flags=[COMPOUND],params=TableParams{comment=tracing sessions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [client command coordinator duration request started_at parameters]],partitionKeyColumns=[session_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[client, command, session_id, coordinator, request, started_at, duration, parameters],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@15368f81[cfId=8826e8e9-e16a-3728-8753-3bc1fc713c25,ksName=system_traces,cfName=events,flags=[COMPOUND],params=TableParams{comment=tracing events, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [activity source source_elapsed thread]],partitionKeyColumns=[session_id],clusteringColumns=[event_id],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[activity, event_id, session_id, source, thread, source_elapsed],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:15:36,106 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [MigrationStage:1] 2025-10-30 05:15:36,113 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [MigrationStage:1] 2025-10-30 05:15:36,119 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [main] 2025-10-30 05:15:36,163 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_distributed, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@66fd4e23[cfId=759fffad-624b-3181-80ee-fa9a52d1f627,ksName=system_distributed,cfName=repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [coordinator exception_message exception_stacktrace finished_at parent_id range_begin range_end started_at status participants]],partitionKeyColumns=[keyspace_name, columnfamily_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[status, id, coordinator, finished_at, participants, exception_stacktrace, parent_id, range_end, range_begin, exception_message, keyspace_name, started_at, columnfamily_name],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@3f8cb44f[cfId=deabd734-b99d-3b9c-92e5-fd92eb5abf14,ksName=system_distributed,cfName=parent_repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [exception_message exception_stacktrace finished_at keyspace_name started_at columnfamily_names options requested_ranges successful_ranges]],partitionKeyColumns=[parent_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[requested_ranges, exception_message, keyspace_name, successful_ranges, started_at, finished_at, options, exception_stacktrace, parent_id, columnfamily_names],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@480f757[cfId=5582b59f-8e4e-35e1-b913-3acada51eb04,ksName=system_distributed,cfName=view_build_status,flags=[COMPOUND],params=TableParams{comment=Materialized View build status, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UUIDType),partitionColumns=[[] | [status]],partitionKeyColumns=[keyspace_name, view_name],clusteringColumns=[host_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[view_name, status, keyspace_name, host_id],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:15:36,367 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [MigrationStage:1] 2025-10-30 05:15:36,371 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [MigrationStage:1] 2025-10-30 05:15:36,378 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [MigrationStage:1] 2025-10-30 05:15:36,388 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [main] 2025-10-30 05:15:36,423 StorageService.java:1446 - JOINING: Finish joining ring INFO [main] 2025-10-30 05:15:36,611 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_auth, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1}}, tables=[org.apache.cassandra.config.CFMetaData@581e852d[cfId=5bc52802-de25-35ed-aeab-188eecebb090,ksName=system_auth,cfName=roles,flags=[COMPOUND],params=TableParams{comment=role definitions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [can_login is_superuser salted_hash member_of]],partitionKeyColumns=[role],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[salted_hash, member_of, role, can_login, is_superuser],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@7f042033[cfId=0ecdaa87-f8fb-3e60-88d1-74fb36fe5c0d,ksName=system_auth,cfName=role_members,flags=[COMPOUND],params=TableParams{comment=role memberships lookup table, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[role],clusteringColumns=[member],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, member],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@3c1cc22f[cfId=3afbe79f-2194-31a7-add7-f5ab90d8ec9c,ksName=system_auth,cfName=role_permissions,flags=[COMPOUND],params=TableParams{comment=permissions granted to db roles, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [permissions]],partitionKeyColumns=[role],clusteringColumns=[resource],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, resource, permissions],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@114320d3[cfId=5f2fbdad-91f1-3946-bd25-d5da3a5c35ec,ksName=system_auth,cfName=resource_role_permissons_index,flags=[COMPOUND],params=TableParams{comment=index of db roles with permissions granted on a resource, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[resource],clusteringColumns=[role],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[resource, role],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [HANDSHAKE-/10.0.0.254] 2025-10-30 05:15:36,648 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [MigrationStage:1] 2025-10-30 05:15:36,805 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [MigrationStage:1] 2025-10-30 05:15:36,828 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [MigrationStage:1] 2025-10-30 05:15:36,849 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [MigrationStage:1] 2025-10-30 05:15:36,857 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [MigrationStage:1] 2025-10-30 05:15:36,985 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [main] 2025-10-30 05:15:37,040 Gossiper.java:1692 - Waiting for gossip to settle... INFO [GossipStage:1] 2025-10-30 05:15:37,417 Gossiper.java:1055 - Node /10.0.0.49 is now part of the cluster INFO [GossipStage:1] 2025-10-30 05:15:37,419 Gossiper.java:1055 - Node /10.0.0.254 is now part of the cluster INFO [RequestResponseStage-1] 2025-10-30 05:15:37,426 Gossiper.java:1019 - InetAddress /10.0.0.254 is now UP INFO [RequestResponseStage-1] 2025-10-30 05:15:37,444 Gossiper.java:1019 - InetAddress /10.0.0.49 is now UP INFO [HANDSHAKE-/10.0.0.49] 2025-10-30 05:15:37,671 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.49 INFO [main] 2025-10-30 05:15:45,041 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-10-30 05:15:45,371 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-10-30 05:15:45,478 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-10-30 05:15:45,489 Server.java:156 - Starting listening for CQL clients on /10.0.0.48:9041 (unencrypted)... INFO [main] 2025-10-30 05:15:45,577 ThriftServer.java:116 - Binding thrift service to /10.0.0.48:9161 INFO [Thread-2] 2025-10-30 05:15:45,591 ThriftServer.java:133 - Listening for thrift clients... INFO [OptionalTasks:1] 2025-10-30 05:15:47,218 CassandraRoleManager.java:356 - Created default superuser role 'cassandra' INFO [MigrationStage:1] 2025-10-30 05:15:52,588 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [Native-Transport-Requests-1] 2025-10-30 05:15:52,670 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5fee7564[cfId=816948d0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=schema_migration_leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [leader leader_hostname took_lead_at]],partitionKeyColumns=[keyspace_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[leader, keyspace_name, took_lead_at, leader_hostname],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:15:52,943 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader INFO [HANDSHAKE-/10.0.0.48] 2025-10-30 05:15:53,622 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.48 WARN [Native-Transport-Requests-2] 2025-10-30 05:15:53,632 TimeFcts.java:99 - The function 'dateof' is deprecated. Use the function 'toTimestamp' instead. INFO [Native-Transport-Requests-1] 2025-10-30 05:15:53,765 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@1d6d0b0a[cfId=82108550-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=running_reapers,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=180, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host]],partitionKeyColumns=[reaper_instance_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:15:54,475 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [MigrationStage:1] 2025-10-30 05:15:54,630 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [Native-Transport-Requests-1] 2025-10-30 05:15:55,490 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@616fccf4[cfId=8317bc20-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_by_cluster_and_keyspace,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name, keyspace_name],clusteringColumns=[repair_schedule_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster_name, repair_schedule_id, keyspace_name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:15:55,694 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [MigrationStage:1] 2025-10-30 05:15:56,469 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [Native-Transport-Requests-1] 2025-10-30 05:15:56,686 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@42382de6[cfId=83ce3ae0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:15:56,842 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [MigrationStage:1] 2025-10-30 05:15:57,642 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [Native-Transport-Requests-1] 2025-10-30 05:15:58,523 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@7669e128[cfId=84e661a0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=snapshot,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cause creation_time owner]],partitionKeyColumns=[cluster, snapshot_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, owner, cause, creation_time, snapshot_name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:15:58,627 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [MigrationStage:1] 2025-10-30 05:15:59,586 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [Native-Transport-Requests-1] 2025-10-30 05:16:00,050 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@2da29a3[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:00,204 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [MigrationStage:1] 2025-10-30 05:16:00,741 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [Native-Transport-Requests-1] 2025-10-30 05:16:01,464 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@340385ee[cfId=86a74b80-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=600, default_time_to_live=600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host reaper_instance_id]],partitionKeyColumns=[leader_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host, leader_id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:01,560 ColumnFamilyStore.java:411 - Initializing reaper_db.leader INFO [Native-Transport-Requests-3] 2025-10-30 05:16:03,014 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@13e5ab92[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[partitioner, seed_hosts, name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6e3782e5[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:03,739 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@1c012db1[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6a6c2486[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, tables, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-30 05:16:04,608 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@545d9d4b[cfId=8886e0f0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=node_metrics_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=864000, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket],clusteringColumns=[host, metric_scope, metric_name, ts, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:04,788 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [MigrationStage:1] 2025-10-30 05:16:05,551 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations INFO [Native-Transport-Requests-1] 2025-10-30 05:16:06,410 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@13e5ab92[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@67461327[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_contact partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, last_contact, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:07,572 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@14f7e70e[cfId=8a4b4d40-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:07,699 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription INFO [Native-Transport-Requests-2] 2025-10-30 05:16:08,632 MigrationManager.java:519 - Drop table 'reaper_db/node_metrics_v2' INFO [STREAM-INIT-/10.0.0.49:41878] 2025-10-30 05:16:08,785 StreamResultFuture.java:116 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d ID#0] Creating new streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.49:41878] 2025-10-30 05:16:08,794 StreamResultFuture.java:123 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d, ID#0] Received streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.49:41866] 2025-10-30 05:16:08,795 StreamResultFuture.java:123 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d, ID#0] Received streaming plan for Bootstrap INFO [STREAM-IN-/10.0.0.49:41878] 2025-10-30 05:16:08,971 StreamResultFuture.java:173 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d ID#0] Prepare completed. Receiving 0 files(0.000KiB), sending 2 files(2.449KiB) INFO [STREAM-IN-/10.0.0.49:41878] 2025-10-30 05:16:09,073 StreamResultFuture.java:187 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d] Session with /10.0.0.49 is complete INFO [STREAM-IN-/10.0.0.49:41878] 2025-10-30 05:16:09,075 StreamResultFuture.java:219 - [Stream #8ae913e0-b54f-11f0-a0a4-19460568179d] All sessions completed INFO [MigrationStage:1] 2025-10-30 05:16:09,496 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 INFO [MigrationStage:1] 2025-10-30 05:16:09,987 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 INFO [MigrationStage:1] 2025-10-30 05:16:11,687 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs INFO [MigrationStage:1] 2025-10-30 05:16:12,572 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule INFO [Native-Transport-Requests-1] 2025-10-30 05:16:13,485 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@7e6be1ff[cfId=83ce3ae0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@393553fb[cfId=83ce3ae0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:15,690 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@7e6be1ff[cfId=83ce3ae0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6fa6bec8[cfId=83ce3ae0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:16,503 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@1c012db1[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6fdb43f3[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-10-30 05:16:17,693 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v1' From org.apache.cassandra.config.CFMetaData@e946d19[cfId=857708e0-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@28603aa3[cfId=857708e0-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-5] 2025-10-30 05:16:17,693 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@1c012db1[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@573f899b[cfId=85cf6210-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-10-30 05:16:17,693 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_cluster_v2' From org.apache.cassandra.config.CFMetaData@66a64455[cfId=8ba5fb40-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@331f9bc1[cfId=8ba5fb40-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-10-30 05:16:17,694 MigrationManager.java:454 - Update table 'reaper_db/running_repairs' From org.apache.cassandra.config.CFMetaData@6b17ea60[cfId=8cabf990-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6c63c1b3[cfId=8cabf990-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:17,695 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@13e5ab92[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_contact partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, last_contact, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6b01e9e2[cfId=84493740-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_contact partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, last_contact, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-10-30 05:16:17,693 MigrationManager.java:454 - Update table 'reaper_db/diagnostic_event_subscription' From org.apache.cassandra.config.CFMetaData@44c57f55[cfId=8a4b4d40-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4193e070[cfId=8a4b4d40-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-7] 2025-10-30 05:16:17,697 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_cluster' From org.apache.cassandra.config.CFMetaData@542fd525[cfId=839bb930-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@79e5aeaa[cfId=839bb930-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-8] 2025-10-30 05:16:17,697 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_unit' From org.apache.cassandra.config.CFMetaData@573a3b5[cfId=862a0530-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6dac6c23[cfId=862a0530-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-9] 2025-10-30 05:16:17,697 MigrationManager.java:454 - Update table 'reaper_db/percent_repaired_by_schedule' From org.apache.cassandra.config.CFMetaData@6497d699[cfId=8d380cf0-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6374d705[cfId=8d380cf0-b54f-11f0-975e-934a4446dfb5,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-10] 2025-10-30 05:16:17,697 MigrationManager.java:454 - Update table 'reaper_db/snapshot' From org.apache.cassandra.config.CFMetaData@7232893[cfId=84e661a0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=snapshot,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cause creation_time owner]],partitionKeyColumns=[cluster, snapshot_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, owner, cause, creation_time, snapshot_name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6447502a[cfId=84e661a0-b54f-11f0-a915-75fc21c97809,ksName=reaper_db,cfName=snapshot,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cause creation_time owner]],partitionKeyColumns=[cluster, snapshot_name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, owner, cause, creation_time, snapshot_name],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-10-30 05:16:31,413 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:16:33,616 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table INFO [Native-Transport-Requests-2] 2025-10-30 05:16:35,123 MigrationManager.java:454 - Update table 'config_db_uuid/obj_uuid_table' From org.apache.cassandra.config.CFMetaData@11b3b730[cfId=99bf72b0-b54f-11f0-975e-934a4446dfb5,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@580cb694[cfId=99bf72b0-b54f-11f0-975e-934a4446dfb5,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-10-30 05:16:36,470 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@31adf42a[cfId=9b84c960-b54f-11f0-a915-75fc21c97809,ksName=svc_monitor_keyspace,cfName=service_instance_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:36,619 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table INFO [MigrationStage:1] 2025-10-30 05:16:38,654 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [Native-Transport-Requests-2] 2025-10-30 05:16:40,023 MigrationManager.java:427 - Update Keyspace 'useragent' From KeyspaceMetadata{name=useragent, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=useragent, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:16:42,511 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [Native-Transport-Requests-2] 2025-10-30 05:16:43,383 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@1cb9975[cfId=9fa3a070-b54f-11f0-a915-75fc21c97809,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:43,481 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [Native-Transport-Requests-2] 2025-10-30 05:16:44,135 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/pool_table' From org.apache.cassandra.config.CFMetaData@648db58f[cfId=9f0f9dd0-b54f-11f0-975e-934a4446dfb5,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4b364260[cfId=9f0f9dd0-b54f-11f0-975e-934a4446dfb5,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:46,930 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [MigrationStage:1] 2025-10-30 05:16:47,909 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [Native-Transport-Requests-4] 2025-10-30 05:16:49,458 MigrationManager.java:454 - Update table 'useragent/useragent_keyval_table' From org.apache.cassandra.config.CFMetaData@59b444b[cfId=a2446300-b54f-11f0-975e-934a4446dfb5,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@151c8177[cfId=a2446300-b54f-11f0-975e-934a4446dfb5,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-10-30 05:16:49,782 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@2b3c16f[cfId=a3740960-b54f-11f0-a915-75fc21c97809,ksName=svc_monitor_keyspace,cfName=healthmonitor_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:49,897 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-1] 2025-10-30 05:16:52,049 MigrationManager.java:454 - Update table 'config_db_uuid/obj_fq_name_table' From org.apache.cassandra.config.CFMetaData@38b7f4c8[cfId=9cc27980-b54f-11f0-a0a4-19460568179d,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@aaf0533[cfId=9cc27980-b54f-11f0-a0a4-19460568179d,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-10-30 05:16:55,439 MigrationManager.java:427 - Update Keyspace 'to_bgp_keyspace' From KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=to_bgp_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:16:55,855 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [Native-Transport-Requests-1] 2025-10-30 05:16:56,437 MigrationManager.java:454 - Update table 'to_bgp_keyspace/route_target_table' From org.apache.cassandra.config.CFMetaData@232f4700[cfId=a7054670-b54f-11f0-975e-934a4446dfb5,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7d90d90a[cfId=a7054670-b54f-11f0-975e-934a4446dfb5,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:57,649 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [Native-Transport-Requests-1] 2025-10-30 05:16:59,462 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@7a2c3bf8[cfId=a9391660-b54f-11f0-a915-75fc21c97809,ksName=to_bgp_keyspace,cfName=service_chain_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:16:59,594 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [MigrationStage:1] 2025-10-30 05:17:01,554 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table INFO [Native-Transport-Requests-3] 2025-10-30 05:17:02,099 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_uuid_table' From org.apache.cassandra.config.CFMetaData@37d0bc3a[cfId=aa656160-b54f-11f0-975e-934a4446dfb5,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4e60e14b[cfId=aa656160-b54f-11f0-975e-934a4446dfb5,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] + curl http://10.0.0.254:8071/webui/login.html % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 1940 100 1940 0 0 247k 0 --:--:-- --:--:-- --:--:-- 270k + export CASSANDRA_REAPER_JMX_KEY + [[ 10.0.0.254 == \1\0\.\0\.\0\.\4\8 ]] + run_service cassandra-reaper + echo 'Reaper started successfully' Reaper started successfully + [[ -n 1999 ]] + [[ -n 1999 ]] + local owner_opts=1999:1999 + mkdir -p /etc/contrail /var/lib/contrail + chown 1999:1999 /etc/contrail /var/lib/contrail + find /etc/contrail -uid 0 -exec chown 1999:1999 '{}' + + chmod 755 /etc/contrail + do_run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 1999 + local user_name=contrail + export HOME=/home/contrail + HOME=/home/contrail + mkdir -p /home/contrail + chown -R 1999:1999 /home/contrail + exec setpriv --reuid 1999 --regid 1999 --clear-groups --no-new-privs cassandra-reaper Looking for reaper under /usr WARN [2025-10-30 05:17:31,614] [main] i.c.ReaperApplication - Reaper is ready to get things done! INFO [Native-Transport-Requests-1] 2025-10-30 05:18:14,745 MigrationManager.java:427 - Update Keyspace 'dm_keyspace' From KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-10-30 05:18:15,692 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [Native-Transport-Requests-5] 2025-10-30 05:18:16,734 MigrationManager.java:454 - Update table 'dm_keyspace/dm_pr_vn_ip_table' From org.apache.cassandra.config.CFMetaData@851917f[cfId=d6924780-b54f-11f0-975e-934a4446dfb5,ksName=dm_keyspace,cfName=dm_pr_vn_ip_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4190887c[cfId=d6924780-b54f-11f0-975e-934a4446dfb5,ksName=dm_keyspace,cfName=dm_pr_vn_ip_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:18:17,963 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [Native-Transport-Requests-1] 2025-10-30 05:18:19,503 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@34006eba[cfId=d8ee38e0-b54f-11f0-a915-75fc21c97809,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-10-30 05:18:19,589 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [MigrationStage:1] 2025-10-30 05:18:21,669 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [Native-Transport-Requests-1] 2025-10-30 05:18:22,543 MigrationManager.java:454 - Update table 'dm_keyspace/dm_pnf_resource_table' From org.apache.cassandra.config.CFMetaData@10466d54[cfId=da255950-b54f-11f0-975e-934a4446dfb5,ksName=dm_keyspace,cfName=dm_pnf_resource_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@1a988e4[cfId=da255950-b54f-11f0-975e-934a4446dfb5,ksName=dm_keyspace,cfName=dm_pnf_resource_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@2c987619, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [HANDSHAKE-/10.0.0.48] 2025-10-30 05:23:33,436 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:33,850 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:33,945 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,014 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,035 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,102 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,177 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,192 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,245 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,298 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,316 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,333 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,346 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,361 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,415 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,464 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,519 Validator.java:281 - [repair #94368ee0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:34,532 ActiveRepairService.java:452 - [repair #94279ac0-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [HANDSHAKE-/10.0.0.254] 2025-10-30 05:23:36,611 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,069 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,085 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,097 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,107 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,120 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,141 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,159 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,190 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,201 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,217 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,226 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,234 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,244 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,256 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,307 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,363 Validator.java:281 - [repair #962e8e50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:37,374 ActiveRepairService.java:452 - [repair #962c1d50-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,321 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,337 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,355 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,364 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,381 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,412 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,427 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,441 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,456 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,471 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,494 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,507 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,523 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,541 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,596 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,651 Validator.java:281 - [repair #99534df0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:42,663 ActiveRepairService.java:452 - [repair #994ff290-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,816 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,828 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,836 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,850 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,865 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,896 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,926 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,939 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,956 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,967 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:43,991 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,007 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,021 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,034 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,091 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,147 Validator.java:281 - [repair #9a30dcb0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:44,156 ActiveRepairService.java:452 - [repair #9a2c96f0-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,080 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,099 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,117 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,133 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,146 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,161 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,182 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,195 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,204 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,214 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,228 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,240 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,253 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,262 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,329 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,386 Validator.java:281 - [repair #9c2c5e90-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:47,413 ActiveRepairService.java:452 - [repair #9c2b7430-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,367 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,384 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,410 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,429 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,453 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,499 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,523 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,540 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,573 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,586 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,596 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,612 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,625 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,640 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,880 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,944 Validator.java:281 - [repair #9f516c50-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:52,950 ActiveRepairService.java:452 - [repair #9f505ae0-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,820 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,835 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,846 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,858 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,870 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,890 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,907 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,918 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,929 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,945 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,964 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:23:53,991 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:23:54,005 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:23:54,025 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:23:54,498 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:23:54,588 Validator.java:281 - [repair #a0300c80-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:23:54,602 ActiveRepairService.java:452 - [repair #a02e5ed0-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [Repair-Task-2] 2025-10-30 05:23:57,134 RepairRunnable.java:139 - Starting repair command #1 (a22cc6e0-b550-11f0-a915-75fc21c97809), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-2] 2025-10-30 05:23:57,173 RepairSession.java:228 - [repair #a2329340-b550-11f0-a915-75fc21c97809] new session: will sync /10.0.0.48, /10.0.0.49, /10.0.0.254 on range [(7524842404210454246,7590266134587615582]] for reaper_db.[repair_run, repair_run_by_unit, repair_run_by_cluster, repair_schedule_v1, percent_repaired_by_schedule, schema_migration_leader, repair_run_by_cluster_v2, repair_unit_v1, cluster, leader, running_reapers, running_repairs, snapshot, diagnostic_event_subscription, repair_schedule_by_cluster_and_keyspace, schema_migration] INFO [RepairJobTask:3] 2025-10-30 05:23:57,264 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:23:57,266 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,277 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,283 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,311 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,312 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,314 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,319 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-10-30 05:23:57,319 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:1] 2025-10-30 05:23:57,324 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:23:57,324 RepairJob.java:257 - Validating /10.0.0.49 INFO [RepairJobTask:4] 2025-10-30 05:23:57,324 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:2] 2025-10-30 05:23:57,325 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_run is fully synced INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,327 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,327 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,334 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,335 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,338 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,338 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-10-30 05:23:57,338 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-10-30 05:23:57,338 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-10-30 05:23:57,340 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_run_by_unit is fully synced INFO [RepairJobTask:3] 2025-10-30 05:23:57,346 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:23:57,346 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,348 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,348 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,356 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,356 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,357 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,358 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-30 05:23:57,358 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-10-30 05:23:57,358 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-30 05:23:57,358 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-10-30 05:23:57,365 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:23:57,365 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,368 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,368 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,376 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,376 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,379 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,379 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-10-30 05:23:57,379 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-10-30 05:23:57,380 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-10-30 05:23:57,380 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_schedule_v1 is fully synced INFO [RepairJobTask:6] 2025-10-30 05:23:57,388 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:23:57,388 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,391 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,391 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,398 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,399 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,400 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,401 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-10-30 05:23:57,401 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-30 05:23:57,401 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-30 05:23:57,401 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-10-30 05:23:57,406 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration_leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:23:57,407 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,409 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,409 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,416 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,416 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,418 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:23:57,419 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-10-30 05:23:57,419 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-10-30 05:23:57,419 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-10-30 05:23:57,421 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] schema_migration_leader is fully synced INFO [RepairJobTask:3] 2025-10-30 05:23:57,426 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:23:57,426 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,428 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,429 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,453 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,453 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,455 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:23:57,456 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-10-30 05:23:57,456 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-30 05:23:57,456 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-10-30 05:23:57,456 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:1] 2025-10-30 05:23:57,466 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:23:57,466 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,469 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,469 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,471 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,472 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,473 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:23:57,474 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:23:57,474 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-10-30 05:23:57,474 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:23:57,474 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_unit_v1 is fully synced INFO [RepairJobTask:4] 2025-10-30 05:23:57,477 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:23:57,477 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,479 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,479 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,482 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,482 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,486 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:23:57,486 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:1] 2025-10-30 05:23:57,486 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:4] 2025-10-30 05:23:57,487 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-10-30 05:23:57,487 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] cluster is fully synced INFO [RepairJobTask:1] 2025-10-30 05:23:57,492 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:23:57,492 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,499 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,499 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,509 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,509 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,512 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:23:57,513 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:2] 2025-10-30 05:23:57,513 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:4] 2025-10-30 05:23:57,513 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:2] 2025-10-30 05:23:57,513 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] leader is fully synced INFO [RepairJobTask:2] 2025-10-30 05:23:57,520 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_reapers (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:23:57,520 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,524 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,524 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,528 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,528 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,529 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:23:57,530 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:5] 2025-10-30 05:23:57,530 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:3] 2025-10-30 05:23:57,530 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:3] 2025-10-30 05:23:57,530 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] running_reapers is fully synced INFO [RepairJobTask:3] 2025-10-30 05:23:57,583 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_repairs (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:23:57,583 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,586 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,586 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,589 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,590 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,591 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:23:57,591 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:7] 2025-10-30 05:23:57,592 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-30 05:23:57,591 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:7] 2025-10-30 05:23:57,592 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] running_repairs is fully synced INFO [RepairJobTask:7] 2025-10-30 05:23:57,595 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for snapshot (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:23:57,596 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,597 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,597 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,599 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,599 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,600 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:23:57,601 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:3] 2025-10-30 05:23:57,601 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:5] 2025-10-30 05:23:57,601 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:3] 2025-10-30 05:23:57,602 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] snapshot is fully synced INFO [RepairJobTask:3] 2025-10-30 05:23:57,604 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:23:57,604 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,606 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,608 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,612 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,612 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,614 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:23:57,615 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:6] 2025-10-30 05:23:57,615 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-10-30 05:23:57,615 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:6] 2025-10-30 05:23:57,615 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] diagnostic_event_subscription is fully synced INFO [RepairJobTask:2] 2025-10-30 05:23:57,618 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:23:57,618 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,621 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,622 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,628 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,628 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,630 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:23:57,631 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:23:57,631 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-10-30 05:23:57,631 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:23:57,633 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-10-30 05:23:57,644 RepairJob.java:234 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:23:57,644 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,646 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,646 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,656 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,656 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:23:57,658 RepairSession.java:180 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:23:57,658 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:23:57,658 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:5] 2025-10-30 05:23:57,658 SyncTask.java:66 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:23:57,658 RepairJob.java:143 - [repair #a2329340-b550-11f0-a915-75fc21c97809] schema_migration is fully synced INFO [RepairJobTask:3] 2025-10-30 05:23:57,659 RepairSession.java:270 - [repair #a2329340-b550-11f0-a915-75fc21c97809] Session completed successfully INFO [RepairJobTask:3] 2025-10-30 05:23:57,660 RepairRunnable.java:261 - Repair session a2329340-b550-11f0-a915-75fc21c97809 for range [(7524842404210454246,7590266134587615582]] finished INFO [RepairJobTask:3] 2025-10-30 05:23:57,663 ActiveRepairService.java:452 - [repair #a22cc6e0-b550-11f0-a915-75fc21c97809] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-30 05:23:57,670 RepairRunnable.java:343 - Repair command #1 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,538 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,572 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,589 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,627 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,666 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,685 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,705 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,723 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,733 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,741 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,749 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,767 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,782 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,794 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,848 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,915 Validator.java:281 - [repair #a55cf830-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:02,928 ActiveRepairService.java:452 - [repair #a55b2370-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,217 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,256 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,314 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,341 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,382 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,448 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,530 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,689 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,712 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,740 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,763 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,835 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,853 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,935 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:04,984 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:05,050 Validator.java:281 - [repair #a645aa80-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:05,100 ActiveRepairService.java:452 - [repair #a63a86f0-b550-11f0-975e-934a4446dfb5] Not a global repair, will not do anticompaction INFO [Repair-Task-3] 2025-10-30 05:24:07,412 RepairRunnable.java:139 - Starting repair command #2 (a84d1340-b550-11f0-a915-75fc21c97809), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 8, pull repair: false) INFO [Repair-Task-3] 2025-10-30 05:24:07,463 RepairSession.java:228 - [repair #a854db70-b550-11f0-a915-75fc21c97809] new session: will sync /10.0.0.48, /10.0.0.49, /10.0.0.254 on range [(-1960622969925667041,-1926382290171162513], (8655462948647425769,8658206395128997478], (7342028298012374571,7343463795674533343], (1180352787631447267,1197268924741077652], (-5980233499411136938,-5966310622453449782], (144184123083797046,147528442615955803], (1640040642172877880,1644184474757638770], (-6120054493619284793,-6108629916421489043]] for reaper_db.[repair_run, repair_run_by_unit, repair_run_by_cluster, repair_schedule_v1, percent_repaired_by_schedule, schema_migration_leader, repair_run_by_cluster_v2, repair_unit_v1, cluster, leader, running_reapers, running_repairs, snapshot, diagnostic_event_subscription, repair_schedule_by_cluster_and_keyspace, schema_migration] INFO [RepairJobTask:3] 2025-10-30 05:24:07,642 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:07,642 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,647 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,649 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,652 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,653 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,655 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:07,658 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:5] 2025-10-30 05:24:07,658 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-30 05:24:07,658 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-30 05:24:07,661 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_run is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:07,662 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:07,663 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,665 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,665 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,669 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,669 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,670 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:07,672 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-10-30 05:24:07,672 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-10-30 05:24:07,672 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-10-30 05:24:07,672 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:07,679 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:07,679 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,682 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,682 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,685 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,686 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,688 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:07,691 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-10-30 05:24:07,691 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-10-30 05:24:07,691 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-10-30 05:24:07,692 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_run_by_cluster is fully synced INFO [RepairJobTask:2] 2025-10-30 05:24:07,697 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:24:07,698 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,701 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,702 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,708 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,709 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,712 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:07,715 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-10-30 05:24:07,716 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-10-30 05:24:07,716 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-10-30 05:24:07,717 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_schedule_v1 is fully synced INFO [RepairJobTask:2] 2025-10-30 05:24:07,727 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:24:07,727 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,731 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,732 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,750 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,750 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,752 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:07,754 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-30 05:24:07,755 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-10-30 05:24:07,755 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-10-30 05:24:07,755 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:07,760 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration_leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:07,761 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,765 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,765 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,789 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,789 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,791 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:07,792 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-10-30 05:24:07,792 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-30 05:24:07,793 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-10-30 05:24:07,793 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] schema_migration_leader is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:07,800 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:07,801 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,805 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,805 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,809 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,809 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,812 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:07,813 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-10-30 05:24:07,813 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-10-30 05:24:07,813 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-10-30 05:24:07,813 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:07,828 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:07,828 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,834 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,834 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,841 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,841 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,846 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:07,847 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-30 05:24:07,847 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:7] 2025-10-30 05:24:07,847 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-30 05:24:07,848 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:07,852 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:07,852 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,857 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,858 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,863 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,863 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,869 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:07,870 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:3] 2025-10-30 05:24:07,870 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:6] 2025-10-30 05:24:07,870 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:3] 2025-10-30 05:24:07,871 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] cluster is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:07,874 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:07,874 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,876 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,876 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,879 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,880 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,881 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:07,884 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:7] 2025-10-30 05:24:07,885 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:4] 2025-10-30 05:24:07,885 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:7] 2025-10-30 05:24:07,885 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] leader is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:07,894 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_reapers (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:07,894 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,896 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,896 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,899 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,899 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,904 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:24:07,904 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-30 05:24:07,905 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-30 05:24:07,905 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-30 05:24:07,905 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] running_reapers is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:07,960 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_repairs (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:07,960 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,963 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,963 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,965 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,966 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,967 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:07,968 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-30 05:24:07,968 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:4] 2025-10-30 05:24:07,968 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-30 05:24:07,968 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] running_repairs is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:07,972 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for snapshot (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:07,972 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,974 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,975 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,978 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,978 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,981 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:7] 2025-10-30 05:24:07,981 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-30 05:24:07,982 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:6] 2025-10-30 05:24:07,982 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-30 05:24:07,982 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] snapshot is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:07,986 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:07,986 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,989 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,990 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,992 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,993 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:07,995 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:7] 2025-10-30 05:24:07,995 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:07,995 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-30 05:24:07,995 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:07,995 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:08,001 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:08,002 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,006 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,007 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,010 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,010 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,017 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:08,017 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:24:08,018 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-30 05:24:08,019 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:24:08,020 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:08,023 RepairJob.java:234 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:08,023 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,025 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,025 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,028 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,028 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:08,030 RepairSession.java:180 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:24:08,030 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-30 05:24:08,030 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:5] 2025-10-30 05:24:08,031 SyncTask.java:66 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:2] 2025-10-30 05:24:08,031 RepairJob.java:143 - [repair #a854db70-b550-11f0-a915-75fc21c97809] schema_migration is fully synced INFO [RepairJobTask:2] 2025-10-30 05:24:08,032 RepairSession.java:270 - [repair #a854db70-b550-11f0-a915-75fc21c97809] Session completed successfully INFO [RepairJobTask:2] 2025-10-30 05:24:08,032 RepairRunnable.java:261 - Repair session a854db70-b550-11f0-a915-75fc21c97809 for range [(-1960622969925667041,-1926382290171162513], (8655462948647425769,8658206395128997478], (7342028298012374571,7343463795674533343], (1180352787631447267,1197268924741077652], (-5980233499411136938,-5966310622453449782], (144184123083797046,147528442615955803], (1640040642172877880,1644184474757638770], (-6120054493619284793,-6108629916421489043]] finished INFO [RepairJobTask:2] 2025-10-30 05:24:08,033 ActiveRepairService.java:452 - [repair #a84d1340-b550-11f0-a915-75fc21c97809] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-10-30 05:24:08,039 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,611 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,625 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,661 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,693 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,720 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,732 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,761 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,791 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,804 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,814 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,826 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,891 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,914 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,972 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:12,989 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:13,025 Validator.java:281 - [repair #ab5d6080-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:13,069 ActiveRepairService.java:452 - [repair #ab5b64b0-b550-11f0-975e-934a4446dfb5] Not a global repair, will not do anticompaction INFO [Repair-Task-4] 2025-10-30 05:24:13,959 RepairRunnable.java:139 - Starting repair command #3 (ac33ea60-b550-11f0-a915-75fc21c97809), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-4] 2025-10-30 05:24:13,969 RepairSession.java:228 - [repair #ac359810-b550-11f0-a915-75fc21c97809] new session: will sync /10.0.0.48, /10.0.0.49, /10.0.0.254 on range [(-2706566189360539456,-2605251413358996802]] for reaper_db.[repair_run, repair_run_by_unit, repair_run_by_cluster, repair_schedule_v1, percent_repaired_by_schedule, schema_migration_leader, repair_run_by_cluster_v2, repair_unit_v1, cluster, leader, running_reapers, running_repairs, snapshot, diagnostic_event_subscription, repair_schedule_by_cluster_and_keyspace, schema_migration] INFO [RepairJobTask:2] 2025-10-30 05:24:14,060 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:24:14,061 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,072 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,072 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,075 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,075 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,076 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:14,079 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:2] 2025-10-30 05:24:14,079 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:5] 2025-10-30 05:24:14,080 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:3] 2025-10-30 05:24:14,082 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_run is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:14,083 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:14,083 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,090 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,091 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,094 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,094 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,096 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:14,096 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-10-30 05:24:14,096 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-10-30 05:24:14,096 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-10-30 05:24:14,096 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:14,103 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:14,103 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,107 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,108 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,111 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,111 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,117 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:14,117 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-10-30 05:24:14,118 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-10-30 05:24:14,118 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-10-30 05:24:14,118 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:14,125 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:14,125 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,129 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,129 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,141 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,141 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,147 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:14,148 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-30 05:24:14,149 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-30 05:24:14,149 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-10-30 05:24:14,149 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_schedule_v1 is fully synced INFO [RepairJobTask:1] 2025-10-30 05:24:14,151 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:24:14,152 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,153 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,154 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,155 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,155 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,156 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:14,157 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-10-30 05:24:14,157 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-10-30 05:24:14,159 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-10-30 05:24:14,159 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:1] 2025-10-30 05:24:14,160 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration_leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:24:14,160 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,162 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,162 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,163 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,163 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,165 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:14,166 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-10-30 05:24:14,166 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-10-30 05:24:14,169 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-30 05:24:14,169 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:14,170 RepairJob.java:257 - Validating /10.0.0.49 INFO [RepairJobTask:1] 2025-10-30 05:24:14,170 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] schema_migration_leader is fully synced INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,174 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,174 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,177 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,178 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,180 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:24:14,182 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-30 05:24:14,183 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-30 05:24:14,184 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-10-30 05:24:14,184 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:14,184 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:14,184 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,186 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,186 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,188 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,188 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,189 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:24:14,189 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:6] 2025-10-30 05:24:14,189 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:24:14,190 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-10-30 05:24:14,192 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:14,194 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:14,194 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,196 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,196 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,198 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,198 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,200 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:14,200 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:2] 2025-10-30 05:24:14,200 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:7] 2025-10-30 05:24:14,200 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-10-30 05:24:14,200 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] cluster is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:14,202 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:14,202 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,203 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,203 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,205 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,205 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,209 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:14,214 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:6] 2025-10-30 05:24:14,214 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:5] 2025-10-30 05:24:14,214 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:6] 2025-10-30 05:24:14,214 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] leader is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:14,223 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_reapers (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:14,224 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,225 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,225 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,227 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,227 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,228 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:14,228 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:5] 2025-10-30 05:24:14,228 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:7] 2025-10-30 05:24:14,229 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:5] 2025-10-30 05:24:14,229 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] running_reapers is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:14,268 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_repairs (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:14,269 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,271 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,271 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,273 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,273 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,274 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:14,275 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:2] 2025-10-30 05:24:14,275 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:4] 2025-10-30 05:24:14,275 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:6] 2025-10-30 05:24:14,275 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] running_repairs is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:14,339 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for snapshot (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:14,340 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,342 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,343 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,345 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,345 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,349 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:14,350 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:2] 2025-10-30 05:24:14,350 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:6] 2025-10-30 05:24:14,350 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:2] 2025-10-30 05:24:14,351 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] snapshot is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:14,352 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:14,352 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,353 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,353 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,355 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,355 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,357 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:24:14,357 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:14,357 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-10-30 05:24:14,357 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-10-30 05:24:14,358 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] diagnostic_event_subscription is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:14,364 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:14,364 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,366 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,366 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,368 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,369 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,371 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:6] 2025-10-30 05:24:14,372 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-10-30 05:24:14,372 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:24:14,372 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-10-30 05:24:14,373 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:14,374 RepairJob.java:234 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:14,375 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,376 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,376 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,377 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,378 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:14,387 RepairSession.java:180 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:14,388 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:24:14,389 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:7] 2025-10-30 05:24:14,388 SyncTask.java:66 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:24:14,389 RepairJob.java:143 - [repair #ac359810-b550-11f0-a915-75fc21c97809] schema_migration is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:14,390 RepairSession.java:270 - [repair #ac359810-b550-11f0-a915-75fc21c97809] Session completed successfully INFO [RepairJobTask:3] 2025-10-30 05:24:14,390 RepairRunnable.java:261 - Repair session ac359810-b550-11f0-a915-75fc21c97809 for range [(-2706566189360539456,-2605251413358996802]] finished INFO [RepairJobTask:3] 2025-10-30 05:24:14,393 ActiveRepairService.java:452 - [repair #ac33ea60-b550-11f0-a915-75fc21c97809] Not a global repair, will not do anticompaction INFO [InternalResponseStage:7] 2025-10-30 05:24:14,396 RepairRunnable.java:343 - Repair command #3 finished in 0 seconds INFO [Repair-Task-5] 2025-10-30 05:24:17,476 RepairRunnable.java:139 - Starting repair command #4 (ae4c9130-b550-11f0-a915-75fc21c97809), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 3, pull repair: false) INFO [Repair-Task-5] 2025-10-30 05:24:17,489 RepairSession.java:228 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] new session: will sync /10.0.0.48, /10.0.0.49, /10.0.0.254 on range [(-2755707449440863725,-2743162652405887868], (-7836426658345234388,-7834752276916369162], (4853782863197770282,4892674996159160029]] for reaper_db.[repair_run, repair_run_by_unit, repair_run_by_cluster, repair_schedule_v1, percent_repaired_by_schedule, schema_migration_leader, repair_run_by_cluster_v2, repair_unit_v1, cluster, leader, running_reapers, running_repairs, snapshot, diagnostic_event_subscription, repair_schedule_by_cluster_and_keyspace, schema_migration] INFO [RepairJobTask:3] 2025-10-30 05:24:17,607 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:17,607 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,610 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,610 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,619 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,619 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,621 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:17,622 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-30 05:24:17,622 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:5] 2025-10-30 05:24:17,622 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:4] 2025-10-30 05:24:17,623 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_run is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:17,624 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:17,624 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,627 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,627 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,635 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,635 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,639 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:2] 2025-10-30 05:24:17,641 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-10-30 05:24:17,640 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-10-30 05:24:17,641 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-10-30 05:24:17,642 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_run_by_unit is fully synced INFO [RepairJobTask:2] 2025-10-30 05:24:17,643 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:24:17,644 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,646 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,647 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,649 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,649 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,650 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:24:17,651 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-30 05:24:17,651 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:7] 2025-10-30 05:24:17,651 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-10-30 05:24:17,651 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:17,657 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:17,658 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,660 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,660 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,671 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,671 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,674 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:24:17,675 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-10-30 05:24:17,675 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-10-30 05:24:17,675 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-10-30 05:24:17,676 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_schedule_v1 is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:17,678 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:17,679 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,680 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,680 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,683 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,683 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,685 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:7] 2025-10-30 05:24:17,685 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-30 05:24:17,685 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-30 05:24:17,685 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-10-30 05:24:17,685 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:17,688 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration_leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:17,689 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,690 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,690 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,692 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,692 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,694 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:17,694 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-10-30 05:24:17,694 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-10-30 05:24:17,695 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-10-30 05:24:17,696 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] schema_migration_leader is fully synced INFO [RepairJobTask:2] 2025-10-30 05:24:17,698 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-10-30 05:24:17,698 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,702 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,702 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,706 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,706 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,707 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:17,708 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-10-30 05:24:17,708 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-10-30 05:24:17,708 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-10-30 05:24:17,708 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:17,712 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:17,712 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,713 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,714 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,719 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,719 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,721 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:4] 2025-10-30 05:24:17,721 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:24:17,721 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:24:17,721 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-10-30 05:24:17,721 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_unit_v1 is fully synced INFO [RepairJobTask:6] 2025-10-30 05:24:17,724 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for cluster (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-10-30 05:24:17,724 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,727 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,729 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,732 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,732 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,734 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:7] 2025-10-30 05:24:17,734 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-10-30 05:24:17,734 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:6] 2025-10-30 05:24:17,734 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:4] 2025-10-30 05:24:17,735 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] cluster is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:17,736 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for leader (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:17,736 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,738 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,739 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,743 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,748 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,750 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:7] 2025-10-30 05:24:17,750 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:7] 2025-10-30 05:24:17,751 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:7] 2025-10-30 05:24:17,751 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:7] 2025-10-30 05:24:17,752 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] leader is fully synced INFO [RepairJobTask:7] 2025-10-30 05:24:17,754 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_reapers (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-10-30 05:24:17,754 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,757 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,757 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,759 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,760 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,761 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:5] 2025-10-30 05:24:17,761 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:2] 2025-10-30 05:24:17,761 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:3] 2025-10-30 05:24:17,761 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:4] 2025-10-30 05:24:17,762 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] running_reapers is fully synced INFO [RepairJobTask:4] 2025-10-30 05:24:17,804 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for running_repairs (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-10-30 05:24:17,804 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,807 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,807 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,810 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,810 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,811 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:17,812 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-30 05:24:17,812 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:5] 2025-10-30 05:24:17,812 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:1] 2025-10-30 05:24:17,812 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] running_repairs is fully synced INFO [RepairJobTask:1] 2025-10-30 05:24:17,815 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for snapshot (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-10-30 05:24:17,815 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,817 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,817 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,819 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,819 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,821 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:17,822 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:3] 2025-10-30 05:24:17,822 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:4] 2025-10-30 05:24:17,822 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:3] 2025-10-30 05:24:17,822 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] snapshot is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:17,827 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:17,828 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,831 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,832 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,834 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,835 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,838 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:17,838 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:17,838 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:17,840 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-10-30 05:24:17,841 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:17,846 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-10-30 05:24:17,846 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,851 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,852 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,860 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,860 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,864 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:3] 2025-10-30 05:24:17,864 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:24:17,864 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-10-30 05:24:17,864 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-10-30 05:24:17,865 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-10-30 05:24:17,868 RepairJob.java:234 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Requesting merkle trees for schema_migration (to [/10.0.0.49, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-10-30 05:24:17,868 RepairJob.java:257 - Validating /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,870 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.49 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,870 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,876 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,876 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-10-30 05:24:17,877 RepairSession.java:180 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:1] 2025-10-30 05:24:17,878 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:24:17,878 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:4] 2025-10-30 05:24:17,878 SyncTask.java:66 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Endpoints /10.0.0.49 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:3] 2025-10-30 05:24:17,878 RepairJob.java:143 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] schema_migration is fully synced INFO [RepairJobTask:3] 2025-10-30 05:24:17,878 RepairSession.java:270 - [repair #ae4eb410-b550-11f0-a915-75fc21c97809] Session completed successfully INFO [RepairJobTask:3] 2025-10-30 05:24:17,878 RepairRunnable.java:261 - Repair session ae4eb410-b550-11f0-a915-75fc21c97809 for range [(-2755707449440863725,-2743162652405887868], (-7836426658345234388,-7834752276916369162], (4853782863197770282,4892674996159160029]] finished INFO [RepairJobTask:3] 2025-10-30 05:24:17,880 ActiveRepairService.java:452 - [repair #ae4c9130-b550-11f0-a915-75fc21c97809] Not a global repair, will not do anticompaction INFO [RepairJobTask:3] 2025-10-30 05:24:17,882 RepairRunnable.java:343 - Repair command #4 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,673 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,685 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,698 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,708 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,731 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,741 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,754 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,763 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,774 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,787 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,807 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,824 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,838 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,846 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,901 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,979 Validator.java:281 - [repair #b15bcd00-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:22,988 ActiveRepairService.java:452 - [repair #b159aa20-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,073 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,093 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,122 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,134 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,145 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,158 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,169 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,181 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,193 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,205 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,215 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,269 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,288 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,343 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,355 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,370 Validator.java:281 - [repair #b235d950-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:24,378 ActiveRepairService.java:452 - [repair #b234eef0-b550-11f0-975e-934a4446dfb5] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,604 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,626 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,637 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,649 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,664 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,674 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,742 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,750 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,768 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,778 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,803 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,933 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:27,951 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:28,009 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:28,035 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:28,048 Validator.java:281 - [repair #b44cab60-b550-11f0-975e-934a4446dfb5] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:28,056 ActiveRepairService.java:452 - [repair #b449ec40-b550-11f0-975e-934a4446dfb5] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,797 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,820 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,833 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,848 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,873 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,892 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,910 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,971 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-10-30 05:24:32,989 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,009 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,030 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,042 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,054 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,063 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,111 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,178 Validator.java:281 - [repair #b7650ef0-b550-11f0-a0a4-19460568179d] Sending completed merkle tree to /10.0.0.49 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-10-30 05:24:33,188 ActiveRepairService.java:452 - [repair #b7633a30-b550-11f0-a0a4-19460568179d] Not a global repair, will not do anticompaction