++ LOG_DIR=/var/log/contrail ++ export CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ mkdir -p /var/log/contrail/config-database-cassandra ++ log_file=/var/log/contrail/config-database-cassandra/console.log ++ touch /var/log/contrail/config-database-cassandra/console.log +++ date ++ echo 'INFO: =================== Mon Aug 25 01:35:20 UTC 2025 ===================' INFO: =================== Mon Aug 25 01:35:20 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ ip route get 1 +++++ grep -o 'dev.*' +++++ awk '{print $2}' ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ get_cidr_for_nic ens3 +++ command -v ip +++ cut -d / -f 1 +++ local nic=ens3 +++ ip addr show dev ens3 +++ head -n 1 +++ grep 'inet ' +++ awk '{print $2}' ++ chmod 600 /var/log/contrail/config-database-cassandra/console.log ++ exec +++ tee -a /var/log/contrail/config-database-cassandra/console.log ++ DEFAULT_LOCAL_IP=10.0.0.50 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=256 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 256 ]] ++ [[ -z '' ]] +++ tail -n 1 +++ awk '/pagesize=2M/{print($3)}' +++ mount -t hugetlbfs ++ HUGE_PAGES_2MB_DIR= ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=openstack ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=rbac ++ AUTH_MODE=keystone ++ AUTH_PARAMS= ++ SSL_ENABLE=false ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ANALYTICSDB_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ANALYTICS_SNMP_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:8081 ' +++ '[' -n '10.0.0.50:8081 10.0.0.254:8081 10.0.0.249:8081 ' ']' +++ echo '10.0.0.50:8081 10.0.0.254:8081 10.0.0.249:8081' ++ ANALYTICS_SERVERS='10.0.0.50:8081 10.0.0.254:8081 10.0.0.249:8081' +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:9042 ' +++ '[' -n '10.0.0.50:9042 10.0.0.254:9042 10.0.0.249:9042 ' ']' +++ echo '10.0.0.50:9042 10.0.0.254:9042 10.0.0.249:9042' ++ ANALYTICSDB_CQL_SERVERS='10.0.0.50:9042 10.0.0.254:9042 10.0.0.249:9042' ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:8086 ' +++ '[' -n '10.0.0.50:8086 10.0.0.254:8086 10.0.0.249:8086 ' ']' +++ echo '10.0.0.50:8086 10.0.0.254:8086 10.0.0.249:8086' ++ COLLECTOR_SERVERS='10.0.0.50:8086 10.0.0.254:8086 10.0.0.249:8086' ++ CASSANDRA_PORT=9161 ++ CASSANDRA_CQL_PORT=9041 ++ CASSANDRA_SSL_STORAGE_PORT=7013 ++ CASSANDRA_STORAGE_PORT=7012 ++ CASSANDRA_JMX_LOCAL_PORT=7201 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ CONFIGDB_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:8082 ' +++ '[' -n '10.0.0.50:8082 10.0.0.254:8082 10.0.0.249:8082 ' ']' +++ echo '10.0.0.50:8082 10.0.0.254:8082 10.0.0.249:8082' ++ CONFIG_SERVERS='10.0.0.50:8082 10.0.0.254:8082 10.0.0.249:8082' +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:9161 ' +++ '[' -n '10.0.0.50:9161 10.0.0.254:9161 10.0.0.249:9161 ' ']' +++ echo '10.0.0.50:9161 10.0.0.254:9161 10.0.0.249:9161' ++ CONFIGDB_SERVERS='10.0.0.50:9161 10.0.0.254:9161 10.0.0.249:9161' +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:9041 ' +++ '[' -n '10.0.0.50:9041 10.0.0.254:9041 10.0.0.249:9041 ' ']' +++ echo '10.0.0.50:9041 10.0.0.254:9041 10.0.0.249:9041' ++ CONFIGDB_CQL_SERVERS='10.0.0.50:9041 10.0.0.254:9041 10.0.0.249:9041' ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=false ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=false ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=false ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=true ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+=10.0.0.50:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+=10.0.0.249:2181, +++ '[' -n 10.0.0.50:2181,10.0.0.254:2181,10.0.0.249:2181, ']' +++ echo 10.0.0.50:2181,10.0.0.254:2181,10.0.0.249:2181 ++ ZOOKEEPER_SERVERS=10.0.0.50:2181,10.0.0.254:2181,10.0.0.249:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:2181 ' +++ '[' -n '10.0.0.50:2181 10.0.0.254:2181 10.0.0.249:2181 ' ']' +++ echo '10.0.0.50:2181 10.0.0.254:2181 10.0.0.249:2181' ++ ZOOKEEPER_SERVERS_SPACE_DELIM='10.0.0.50:2181 10.0.0.254:2181 10.0.0.249:2181' ++ RABBITMQ_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+=10.0.0.50:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+=10.0.0.249:5673, +++ '[' -n 10.0.0.50:5673,10.0.0.254:5673,10.0.0.249:5673, ']' +++ echo 10.0.0.50:5673,10.0.0.254:5673,10.0.0.249:5673 ++ RABBITMQ_SERVERS=10.0.0.50:5673,10.0.0.254:5673,10.0.0.249:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=false ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:6379 ' +++ '[' -n '10.0.0.50:6379 10.0.0.254:6379 10.0.0.249:6379 ' ']' +++ echo '10.0.0.50:6379 10.0.0.254:6379 10.0.0.249:6379' ++ REDIS_SERVERS='10.0.0.50:6379 10.0.0.254:6379 10.0.0.249:6379' ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=false ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.249 +++ local server_address=10.0.0.249 +++ extended_server_list+='10.0.0.249:9092 ' +++ '[' -n '10.0.0.50:9092 10.0.0.254:9092 10.0.0.249:9092 ' ']' +++ echo '10.0.0.50:9092 10.0.0.254:9092 10.0.0.249:9092' ++ KAFKA_SERVERS='10.0.0.50:9092 10.0.0.254:9092 10.0.0.249:9092' ++ KAFKA_SSL_ENABLE=false ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/v3 ++ KEYSTONE_AUTH_HOST=10.0.0.50 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.50,10.0.0.254,10.0.0.249 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=false ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=false ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=false ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ /v3 == \/\v\2\.\0 ]] ++ [[ openstack == \o\p\e\n\s\t\a\c\k ]] ++ AUTH_MODE=keystone ++ [[ keystone == \k\e\y\s\t\o\n\e ]] ++ AUTH_PARAMS='--admin_password contrail123' ++ AUTH_PARAMS+=' --admin_tenant_name admin' ++ AUTH_PARAMS+=' --admin_user admin' ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + CONFIG=/etc/cassandra/cassandra.yaml + JVM_OPTIONS_CONFIG=/etc/cassandra/jvm.options + cp /etc/cassandra/cassandra.origin /etc/cassandra/cassandra.yaml + cp /etc/cassandra/jvm.options.origin /etc/cassandra/jvm.options + for i in '{1..10}' ++ find_my_ip_and_order_for_node_list 10.0.0.50,10.0.0.254,10.0.0.249 ++ local servers=10.0.0.50,10.0.0.254,10.0.0.249 ++ local server_list= ++ IFS=, ++ read -ra server_list ++ cut -d ' ' -f 1 +++ tr '\n' , +++ get_local_ips +++ cat /proc/net/fib_trie +++ awk '/32 host/ { print f } {f=$2}' +++ grep -vi host +++ sort +++ uniq ++ local local_ips=,10.0.0.50,10.20.0.17,127.0.0.1,172.17.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.50'\''))' ++ local server_ip=10.0.0.50 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.50 ]] ++ [[ ,10.0.0.50,10.20.0.17,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.50, ]] ++ echo 10.0.0.50 1 ++ return + my_ip=10.0.0.50 + '[' -n 10.0.0.50 ']' + break + '[' -z 10.0.0.50 ']' ++ echo 10.0.0.50,10.0.0.254,10.0.0.249 ++ tr , ' ' ++ wc -w + export CASSANDRA_COUNT=3 + CASSANDRA_COUNT=3 ++ echo 10.0.0.50,10.0.0.254,10.0.0.249 ++ sed 's/,/", "/g' + export 'CASSANDRA_CONNECT_POINTS=10.0.0.50", "10.0.0.254", "10.0.0.249' + CASSANDRA_CONNECT_POINTS='10.0.0.50", "10.0.0.254", "10.0.0.249' ++ echo 10.0.0.50,10.0.0.254,10.0.0.249 ++ cut -d , -f 1,2 + export CASSANDRA_SEEDS=10.0.0.50,10.0.0.254 + CASSANDRA_SEEDS=10.0.0.50,10.0.0.254 + export CASSANDRA_LISTEN_ADDRESS=10.0.0.50 + CASSANDRA_LISTEN_ADDRESS=10.0.0.50 + export CASSANDRA_RPC_ADDRESS=10.0.0.50 + CASSANDRA_RPC_ADDRESS=10.0.0.50 + echo 'INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g' INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g + for yaml in Xmx Xms ++ echo -Xms1g -Xmx2g ++ sed -n 's/.*\(-Xmx[0-9]*[mMgG]\).*/\1/p' + opt=-Xmx2g + [[ -n -Xmx2g ]] ++ echo -Xms1g -Xmx2g ++ sed 's/-Xmx[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS='-Xms1g ' + sed -i 's/^[#]*-Xmx.*/-Xmx2g/g' /etc/cassandra/jvm.options + for yaml in Xmx Xms ++ sed -n 's/.*\(-Xms[0-9]*[mMgG]\).*/\1/p' ++ echo -Xms1g + opt=-Xms1g + [[ -n -Xms1g ]] ++ sed 's/-Xms[0-9]*[mMgG]//g' ++ echo -Xms1g + JVM_EXTRA_OPTS= + sed -i 's/^[#]*-Xms.*/-Xms1g/g' /etc/cassandra/jvm.options + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + is_enabled true + local val=true + [[ true == \t\r\u\e ]] + export LOCAL_JMX=no + LOCAL_JMX=no + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + is_enabled false + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + cat + change_variable memtable_flush_writers 4 + local VARIABLE_NAME=memtable_flush_writers + local VARIABLE_VALUE=4 + sed -i 's/.*\(memtable_flush_writers\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_compactors 4 + local VARIABLE_NAME=concurrent_compactors + local VARIABLE_VALUE=4 + sed -i 's/.*\(concurrent_compactors\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable compaction_throughput_mb_per_sec 256 + local VARIABLE_NAME=compaction_throughput_mb_per_sec + local VARIABLE_VALUE=256 + sed -i 's/.*\(compaction_throughput_mb_per_sec\):.*\([0-9a-z]\)/\1: 256/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_reads 64 + local VARIABLE_NAME=concurrent_reads + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_reads\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_writes 64 + local VARIABLE_NAME=concurrent_writes + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_writes\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable memtable_allocation_type offheap_objects + local VARIABLE_NAME=memtable_allocation_type + local VARIABLE_VALUE=offheap_objects + sed -i 's/.*\(memtable_allocation_type\):.*\([0-9a-z]\)/\1: offheap_objects/g' /etc/cassandra/cassandra.yaml + log_levels_map=([SYS_DEBUG]='DEBUG' [SYS_INFO]='INFO' [SYS_NOTICE]='INFO' [SYS_ERROR]="ERROR") + declare -A log_levels_map + log_level=DEBUG + '[' -n DEBUG ']' + sed -i 's/\(; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.50; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.50; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.50,10.0.0.254}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-08-25 01:35:24,633 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-08-25 01:35:24,634 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-08-25 01:35:24,634 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-08-25 01:35:24,668 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-08-25 01:35:24,669 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-08-25 01:35:24,998 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-08-25 01:35:25,013 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-3915-1. INFO [main] 2025-08-25 01:35:25,016 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-08-25 01:35:25,017 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-08-25 01:35:25,017 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4600064(4492K) committed = 4653056(4544K) max = 251658240(245760K) INFO [main] 2025-08-25 01:35:25,022 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 19583920(19124K) committed = 20316160(19840K) max = -1(-1K) INFO [main] 2025-08-25 01:35:25,025 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2278952(2225K) committed = 2490368(2432K) max = 1073741824(1048576K) INFO [main] 2025-08-25 01:35:25,025 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 93992488(91789K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-08-25 01:35:25,025 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-08-25 01:35:25,031 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-08-25 01:35:25,032 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-08-25 01:35:25,032 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-08-25 01:35:25,133 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-08-25 01:35:25,150 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-08-25 01:35:25,152 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-08-25 01:35:25,156 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-08-25 01:35:25,183 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-08-25 01:35:25,186 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-08-25 01:35:25,216 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-08-25 01:35:25,222 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-08-25 01:35:25,224 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-08-25 01:35:25,273 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-08-25 01:35:25,994 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-08-25 01:35:27,548 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-08-25 01:35:27,565 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-08-25 01:35:27,599 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-08-25 01:35:27,604 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-08-25 01:35:27,627 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-08-25 01:35:27,632 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-08-25 01:35:27,649 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-08-25 01:35:27,662 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-08-25 01:35:27,685 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-08-25 01:35:27,698 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-08-25 01:35:27,704 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-08-25 01:35:27,731 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-08-25 01:35:27,736 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-08-25 01:35:27,740 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-08-25 01:35:27,748 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-08-25 01:35:27,751 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-08-25 01:35:27,759 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-08-25 01:35:27,764 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-08-25 01:35:27,772 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-08-25 01:35:27,778 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-08-25 01:35:27,788 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-08-25 01:35:27,792 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-08-25 01:35:27,797 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-08-25 01:35:27,798 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-08-25 01:35:27,921 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-08-25 01:35:27,945 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-08-25 01:35:27,960 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-08-25 01:35:27,968 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-08-25 01:35:27,978 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-08-25 01:35:27,983 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-08-25 01:35:27,986 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-08-25 01:35:27,988 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-08-25 01:35:27,991 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-08-25 01:35:27,994 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-08-25 01:35:27,997 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-08-25 01:35:27,998 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:1] 2025-08-25 01:35:28,677 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:1] 2025-08-25 01:35:28,698 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:1] 2025-08-25 01:35:28,701 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:1] 2025-08-25 01:35:28,706 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:4] 2025-08-25 01:35:29,038 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-08-25 01:35:29,181 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-08-25 01:35:29,215 StorageService.java:607 - Token metadata: INFO [pool-4-thread-1] 2025-08-25 01:35:29,230 AutoSavingCache.java:174 - Completed loading (0 ms; 5 keys) KeyCache cache INFO [main] 2025-08-25 01:35:29,245 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-08-25 01:35:29,246 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-08-25 01:35:29,259 StorageService.java:607 - Token metadata: INFO [main] 2025-08-25 01:35:29,347 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-08-25 01:35:29,348 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-08-25 01:35:29,348 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-08-25 01:35:29,348 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-08-25 01:35:29,348 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-08-25 01:35:29,415 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-08-25 01:35:29,424 MessagingService.java:761 - Starting Messaging Service on /10.0.0.50:7012 (ens3) WARN [main] 2025-08-25 01:35:29,427 SystemKeyspace.java:1087 - No host ID found, created 1144bbfa-70c2-467b-9342-79e604f43f49 (Note: This should happen exactly once per node). INFO [main] 2025-08-25 01:35:29,490 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.254] 2025-08-25 01:35:29,515 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [HANDSHAKE-/10.0.0.249] 2025-08-25 01:35:29,518 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.249 + cqlsh 10.0.0.50 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' Connection error: ('Unable to connect to any servers', {'10.0.0.50': error(111, "Tried connecting to [('10.0.0.50', 9041)]. Last error: Connection refused")}) + sleep 10 INFO [main] 2025-08-25 01:35:35,519 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-08-25 01:35:35,520 StorageService.java:822 - Starting up server gossip INFO [main] 2025-08-25 01:35:35,589 StorageService.java:883 - This node will not auto bootstrap because it is configured to be a seed node. INFO [main] 2025-08-25 01:35:35,594 BootStrapper.java:228 - Generated random tokens. tokens are [-2807616796715957186, 5485702775691441096, -6405518406886541152, -3505691596755704318, 676256560032877422, -8534551998704717890, -3328515780659674832, 2618340644323403120, 6825325630956098637, 776308616334874344, 4384694894274557856, 7776964252401932154, 4769066525267046947, 1156855802990948772, -2111225211502336267, -4434274777493941748, 677305890573667462, -5325332207640563649, 586925385848902299, -6073855932938360874, 248521266015225946, -7925845523049583769, -8102910702264428826, -715516046082019849, -628310227805258592, 1981061661748300011, -3516395498038934420, -3224242729961388483, -2429211838159524217, 1612227321589816521, -7375036466866973133, -1976309252794620591, 3315801477251218575, 4154304147794627630, -2825012539990780615, -1095410778567937250, -2740487282247309637, 6903842824702902789, 3351809722961944655, 8382913062820569353, 2441746179451306295, 3126699340168232307, -8850371742589704081, 3455170563312486467, 883681682309243201, 3426432818655511302, 3664780679308549095, 8200485509815313880, 2235761904708862039, -1068441300478359922, 9051848138729614307, -1234688668143690078, 1386094388240798371, 3247061840542299609, -8322808623876585995, -6639856974286509389, 4245576770883506744, 622155316796416224, -8341784077326522326, -8097873317595793823, 2357981322825284939, 8541522064866997857, -7325194401619197329, 6857273097556240752, 6933948299523018883, 1071626145854567239, -8456043116857083116, 5046665137444109045, 6094670888652146444, 3481712853155428748, -4321478502558403532, -8052660704996370808, 3015201340698051693, -7844349670582028000, 5408171098669070785, -2017824721261524412, 7460238632285797273, 2891710263572066817, 3378367830328548488, -321773734346065105, 5205048993479115277, -159126347976039360, -6763696512194517635, -6904824849924859616, 2347927683646139380, -1875550027992289410, 60930909963746326, -234330031209074278, -6529855644936352804, -3464228044227952797, 2624900974568777970, 8475384606585350665, 8964231210050926984, -8428934393671600296, 4384705237949471391, -3793749229619252892, -4143198141596159724, 8810157913210651909, -2558401487172705902, 8857709677457847348, -3784814733808442784, -491488413420932955, -6560530830771694565, 2204125735429927526, 6510558083200963124, -1483341109750670291, -7716019406637515710, -1772391165464730760, -54004905494185821, -4536118834020056214, -7163636794750686480, -4091335878313071098, 3410347696048411944, 825347499127134008, 8294802846338418461, -1464470794617764292, -3805249150157223706, -6048390298089700721, -3148929831937238679, 6953445008509921925, -5055678720034755563, 1637135547132182454, -7232204970066237232, -722224000690730364, -5171965159082867716, 8707783103061001635, 6764128773770374136, -8258993261430075166, -6278136260211286255, 5777335576842431198, -4860493886672894238, 9090106206906042746, 6317147121977754151, -8864323219532656511, 6209392072308450566, -2243586678631032963, 2972071114338853254, -1046703987799374327, 8960909833315715167, -2241330441446955677, 2885158108798052526, 7012393739419223034, 3528910699449438389, -504587197300102710, 2483587877122085690, 1557813120847726314, 4005798115597172357, 6406013187659085355, -662538913092615026, -4883569613378462471, -3722199555124921364, -1572135559417000398, 1499243584807652302, -2950244073981609700, 3522686011267728208, -492580396503705175, 2576114194952530981, 6641486193783679164, 5032018876214010279, -3725598658400305451, -8076645807299955066, -7992934045060639862, 9214193155627438659, -1812922044999313209, 2551917663437909242, 7351414540795790108, 2360335833185214657, 5016417854634025947, 560405145125592724, 2631803381508968810, -4264894214237531902, 7080450136056880457, 5664291740344324557, 8714442338712568895, -751645792425765540, 3452215380951881939, -1329831482630162157, -2357553942810641165, -2921540152000146342, 2179338726541220076, 8264942518067966144, -8462030073748790784, 2053335642361270650, -4901073363767203406, 7202669970479259232, 8066069154727667436, -7333772406448304762, 3337901743585771331, -9183932456401004826, 8707166827061350071, -1694371899051581161, -5840973370553348220, -6150429260031761065, 7095797683118132695, 6094525909568591954, 3824482910386511168, 3763877709195163131, -2681956663237098209, -7619841253164261041, 1949360667842197108, -4721767991096108538, 330961137167467825, 7926626534130605898, 7334532180480388416, -4886229933995212488, 1541130743981010344, -15912496034833003, 704980091965037199, -3752179573646904439, 530018759638044470, 5772373342997908250, -2696710550152509137, -6960689986480153183, 7515488102491733456, 3206454964403154826, -841563415802502578, -9022753390868745291, 1080511086402293426, 1531436268688878722, -3252586430576592253, 5077341520023135915, 2540340883585772768, 4609130578995337714, -8591914531750851008, -8654291527033133717, -954868544877666751, -9024560925152971066, 1374947776999418788, 4991943143677249664, 6861487511106982971, -7877758003817815988, -2243745484168375925, 7564615129332353591, -9212851549595289011, 7493120201804044536, 6041848929797815474, -266830754603022346, 8048928556995787707, 6593549137023826742, -3114381410568609665, -6243119491983249055, 2361647483118021942, 6989658978151414010, 5870868974645119028, -5439819757650427715, 528860758208598548, -8910286657846496535, -4617493110108150457, -6567048838207667927, -4933348610361358898, -7987666849580724468, -7768904484488998576, -2383149090845302078, 2233856731509307551, 3437740793771047377, 1147502063321066074] INFO [main] 2025-08-25 01:35:35,598 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_traces, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2}}, tables=[org.apache.cassandra.config.CFMetaData@60198389[cfId=c5e99f16-8677-3914-b17e-960613512345,ksName=system_traces,cfName=sessions,flags=[COMPOUND],params=TableParams{comment=tracing sessions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [client command coordinator duration request started_at parameters]],partitionKeyColumns=[session_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[client, command, session_id, coordinator, request, started_at, duration, parameters],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@73352689[cfId=8826e8e9-e16a-3728-8753-3bc1fc713c25,ksName=system_traces,cfName=events,flags=[COMPOUND],params=TableParams{comment=tracing events, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=0, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [activity source source_elapsed thread]],partitionKeyColumns=[session_id],clusteringColumns=[event_id],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[activity, event_id, session_id, source, thread, source_elapsed],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-08-25 01:35:35,976 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [MigrationStage:1] 2025-08-25 01:35:35,979 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [MigrationStage:1] 2025-08-25 01:35:35,983 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [main] 2025-08-25 01:35:36,001 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_distributed, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@2d2f584f[cfId=759fffad-624b-3181-80ee-fa9a52d1f627,ksName=system_distributed,cfName=repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | [coordinator exception_message exception_stacktrace finished_at parent_id range_begin range_end started_at status participants]],partitionKeyColumns=[keyspace_name, columnfamily_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[status, id, coordinator, finished_at, participants, exception_stacktrace, parent_id, range_end, range_begin, exception_message, keyspace_name, started_at, columnfamily_name],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@4addbb14[cfId=deabd734-b99d-3b9c-92e5-fd92eb5abf14,ksName=system_distributed,cfName=parent_repair_history,flags=[COMPOUND],params=TableParams{comment=Repair history, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [exception_message exception_stacktrace finished_at keyspace_name started_at columnfamily_names options requested_ranges successful_ranges]],partitionKeyColumns=[parent_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[requested_ranges, exception_message, keyspace_name, successful_ranges, started_at, finished_at, options, exception_stacktrace, parent_id, columnfamily_names],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@43c04787[cfId=5582b59f-8e4e-35e1-b913-3acada51eb04,ksName=system_distributed,cfName=view_build_status,flags=[COMPOUND],params=TableParams{comment=Materialized View build status, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UUIDType),partitionColumns=[[] | [status]],partitionKeyColumns=[keyspace_name, view_name],clusteringColumns=[host_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[view_name, status, keyspace_name, host_id],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-08-25 01:35:36,123 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [MigrationStage:1] 2025-08-25 01:35:36,127 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [MigrationStage:1] 2025-08-25 01:35:36,132 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [MigrationStage:1] 2025-08-25 01:35:36,137 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [main] 2025-08-25 01:35:36,158 StorageService.java:1446 - JOINING: Finish joining ring INFO [GossipStage:1] 2025-08-25 01:35:36,271 Gossiper.java:1055 - Node /10.0.0.249 is now part of the cluster INFO [GossipStage:1] 2025-08-25 01:35:36,276 Gossiper.java:1055 - Node /10.0.0.254 is now part of the cluster INFO [main] 2025-08-25 01:35:36,294 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=system_auth, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1}}, tables=[org.apache.cassandra.config.CFMetaData@2c9ac1dd[cfId=5bc52802-de25-35ed-aeab-188eecebb090,ksName=system_auth,cfName=roles,flags=[COMPOUND],params=TableParams{comment=role definitions, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [can_login is_superuser salted_hash member_of]],partitionKeyColumns=[role],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[salted_hash, member_of, role, can_login, is_superuser],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@456c711d[cfId=0ecdaa87-f8fb-3e60-88d1-74fb36fe5c0d,ksName=system_auth,cfName=role_members,flags=[COMPOUND],params=TableParams{comment=role memberships lookup table, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[role],clusteringColumns=[member],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, member],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@113359ab[cfId=3afbe79f-2194-31a7-add7-f5ab90d8ec9c,ksName=system_auth,cfName=role_permissions,flags=[COMPOUND],params=TableParams{comment=permissions granted to db roles, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [permissions]],partitionKeyColumns=[role],clusteringColumns=[resource],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[role, resource, permissions],droppedColumns={},triggers=[],indexes=[]], org.apache.cassandra.config.CFMetaData@581e9420[cfId=5f2fbdad-91f1-3946-bd25-d5da3a5c35ec,ksName=system_auth,cfName=resource_role_permissons_index,flags=[COMPOUND],params=TableParams{comment=index of db roles with permissions granted on a resource, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=7776000, default_time_to_live=0, memtable_flush_period_in_ms=3600000, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | []],partitionKeyColumns=[resource],clusteringColumns=[role],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[resource, role],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [RequestResponseStage-1] 2025-08-25 01:35:36,311 Gossiper.java:1019 - InetAddress /10.0.0.249 is now UP INFO [RequestResponseStage-3] 2025-08-25 01:35:36,371 Gossiper.java:1019 - InetAddress /10.0.0.254 is now UP INFO [GossipStage:1] 2025-08-25 01:35:36,387 TokenMetadata.java:479 - Updating topology for /10.0.0.254 INFO [GossipStage:1] 2025-08-25 01:35:36,387 TokenMetadata.java:479 - Updating topology for /10.0.0.254 INFO [MigrationStage:1] 2025-08-25 01:35:36,494 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [MigrationStage:1] 2025-08-25 01:35:36,496 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [MigrationStage:1] 2025-08-25 01:35:36,504 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [MigrationStage:1] 2025-08-25 01:35:36,508 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [MigrationStage:1] 2025-08-25 01:35:36,513 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [main] 2025-08-25 01:35:36,524 Gossiper.java:1692 - Waiting for gossip to settle... INFO [HANDSHAKE-/10.0.0.254] 2025-08-25 01:35:36,529 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 WARN [GossipTasks:1] 2025-08-25 01:35:36,563 FailureDetector.java:288 - Not marking nodes down due to local pause of 7339972155 > 5000000000 INFO [HANDSHAKE-/10.0.0.249] 2025-08-25 01:35:39,271 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.249 + cqlsh 10.0.0.50 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' Connection error: ('Unable to connect to any servers', {'10.0.0.50': error(111, "Tried connecting to [('10.0.0.50', 9041)]. Last error: Connection refused")}) + sleep 10 INFO [main] 2025-08-25 01:35:44,525 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-08-25 01:35:44,840 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-08-25 01:35:44,947 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-08-25 01:35:44,948 Server.java:156 - Starting listening for CQL clients on /10.0.0.50:9041 (unencrypted)... INFO [main] 2025-08-25 01:35:45,009 ThriftServer.java:116 - Binding thrift service to /10.0.0.50:9161 INFO [Thread-2] 2025-08-25 01:35:45,013 ThriftServer.java:133 - Listening for thrift clients... + cqlsh 10.0.0.50 9041 -e 'CREATE KEYSPACE IF NOT EXISTS reaper_db WITH replication = {'\''class'\'': '\''NetworkTopologyStrategy'\'', '\''datacenter1'\'': 3};' INFO [Native-Transport-Requests-8] 2025-08-25 01:35:52,904 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=reaper_db, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.NetworkTopologyStrategy, datacenter1=3}}, tables=[], views=[], functions=[], types=[]} + export CASSANDRA_REAPER_JMX_KEY + [[ 10.0.0.50 == \1\0\.\0\.\0\.\5\0 ]] + sleep 120 + run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + local owner_opts=1999:1999 + mkdir -p /etc/contrail /var/lib/contrail + chown 1999:1999 /etc/contrail /var/lib/contrail + find /etc/contrail -uid 0 -exec chown 1999:1999 '{}' + + chmod 755 /etc/contrail + do_run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 1999 + local user_name=contrail + export HOME=/home/contrail + HOME=/home/contrail + mkdir -p /home/contrail + chown -R 1999:1999 /home/contrail + exec setpriv --reuid 1999 --regid 1999 --clear-groups --no-new-privs cassandra-reaper Looking for reaper under /usr WARN [2025-08-25 01:35:56,891] [main] c.d.d.c.ReplicationStrategy$NetworkTopologyStrategy - Error while computing token map for keyspace reaper_db with datacenter datacenter1: could not achieve replication factor 3 (found 2 replicas only), check your keyspace replication settings. INFO [Native-Transport-Requests-2] 2025-08-25 01:35:57,109 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@53db1d2a[cfId=d8fa33f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:35:57,258 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [MigrationStage:1] 2025-08-25 01:35:57,591 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader WARN [2025-08-25 01:35:58,521] [main] i.c.s.CassandraStorage - Starting db migration from 0 to 31… WARN [2025-08-25 01:35:58,642] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:35:58,646] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:35:58,649] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [HANDSHAKE-/10.0.0.50] 2025-08-25 01:35:58,683 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.50 WARN [Native-Transport-Requests-2] 2025-08-25 01:35:58,690 TimeFcts.java:99 - The function 'dateof' is deprecated. Use the function 'toTimestamp' instead. INFO [MigrationStage:1] 2025-08-25 01:35:58,885 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [Native-Transport-Requests-4] 2025-08-25 01:35:59,743 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3565d60d[cfId=da8d08f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:35:59,907 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [MigrationStage:1] 2025-08-25 01:36:00,497 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [Native-Transport-Requests-1] 2025-08-25 01:36:00,709 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@4351d509[cfId=db206f50-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:00,854 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [MigrationStage:1] 2025-08-25 01:36:01,511 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [Native-Transport-Requests-4] 2025-08-25 01:36:01,651 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@2c595fd6[cfId=dbb02c30-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[partitioner, seed_hosts, name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:01,741 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [MigrationStage:1] 2025-08-25 01:36:02,499 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [STREAM-INIT-/10.0.0.249:58504] 2025-08-25 01:36:02,561 StreamResultFuture.java:116 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc ID#0] Creating new streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.249:58504] 2025-08-25 01:36:02,569 StreamResultFuture.java:123 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc, ID#0] Received streaming plan for Bootstrap INFO [STREAM-INIT-/10.0.0.249:58520] 2025-08-25 01:36:02,573 StreamResultFuture.java:123 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc, ID#0] Received streaming plan for Bootstrap INFO [STREAM-IN-/10.0.0.249:58520] 2025-08-25 01:36:02,664 StreamResultFuture.java:173 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc ID#0] Prepare completed. Receiving 0 files(0.000KiB), sending 1 files(0.072KiB) INFO [STREAM-IN-/10.0.0.249:58520] 2025-08-25 01:36:02,726 StreamResultFuture.java:187 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc] Session with /10.0.0.249 is complete INFO [STREAM-IN-/10.0.0.249:58520] 2025-08-25 01:36:02,727 StreamResultFuture.java:219 - [Stream #dc1f41b0-8153-11f0-b872-d9a04db196bc] All sessions completed INFO [Native-Transport-Requests-3] 2025-08-25 01:36:02,747 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@439a5f3e[cfId=dc5768b0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:02,868 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [MigrationStage:1] 2025-08-25 01:36:03,428 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [Native-Transport-Requests-5] 2025-08-25 01:36:03,648 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@499de89d[cfId=dce0e400-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:03,756 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [MigrationStage:1] 2025-08-25 01:36:04,581 ColumnFamilyStore.java:411 - Initializing reaper_db.leader WARN [2025-08-25 01:36:05,421] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:05,425] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:05,428] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-08-25 01:36:06,482] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:06,486] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:06,488] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-08-25 01:36:06,539] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-08-25 01:36:06,565] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-08-25 01:36:06,612] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:06,621] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:06,623] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-08-25 01:36:06,680] [main] i.c.s.c.FixRepairRunTimestamps - Correcting timestamps in the repair_run table. This may take some minutes… WARN [2025-08-25 01:36:06,682] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO repair_run (id,start_time,pause_time,end_time) VALUES(?, ?, ?, ?)' WARN [2025-08-25 01:36:06,715] [main] i.c.s.c.FixRepairRunTimestamps - Correction of timestamps in the repair_run table completed. WARN [2025-08-25 01:36:06,766] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:06,771] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:06,785] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' WARN [2025-08-25 01:36:07,440] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:07,442] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:07,445] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:07,760 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [Native-Transport-Requests-7] 2025-08-25 01:36:08,432 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@6a82e7b[cfId=dfbadf00-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:08,544 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations WARN [2025-08-25 01:36:08,840] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:08,843] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:08,845] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-08-25 01:36:08,878 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@2dd8358d[cfId=dbb02c30-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4fc61f7d[cfId=dbb02c30-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] WARN [2025-08-25 01:36:10,459] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:10,464] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:10,467] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:10,644 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription WARN [2025-08-25 01:36:11,517] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:11,520] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:11,522] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:12,537 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 WARN [2025-08-25 01:36:13,488] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:13,493] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:13,496] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:13,789 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 WARN [2025-08-25 01:36:15,340] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:15,346] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:15,348] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-08-25 01:36:15,477 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@2bbe53ca[cfId=dcadc610-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, last_event, id, segment_end_time, state, cluster_name, end_time, end_token, start_token, segment_start_time, segment_state, cause, creation_time, start_time, coordinator_host, token_ranges, owner, repair_parallelism, tables, segment_id, pause_time, repair_unit_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3e530019[cfId=dcadc610-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] WARN [2025-08-25 01:36:16,407] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:16,413] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:16,416] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:16,584 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs WARN [2025-08-25 01:36:17,517] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:17,519] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:17,525] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [MigrationStage:1] 2025-08-25 01:36:17,648 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule WARN [2025-08-25 01:36:19,353] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:19,355] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:19,361] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-3] 2025-08-25 01:36:19,382 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@25a74a6c[cfId=da8d08f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6e3eea56[cfId=da8d08f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] WARN [2025-08-25 01:36:21,586] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:21,589] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:21,590] [contrail_database-worker-2] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-2] 2025-08-25 01:36:21,634 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@10254e08[cfId=db7dd190-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@21e21e93[cfId=db7dd190-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-08-25 01:36:23,719 MigrationManager.java:427 - Update Keyspace 'svc_monitor_keyspace' From KeyspaceMetadata{name=svc_monitor_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=svc_monitor_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} WARN [2025-08-25 01:36:23,838] [contrail_database-worker-0] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='insert into schema_migration(applied_successful, version, script_name, script, executed_at) values(?, ?, ?, ?, ?)' WARN [2025-08-25 01:36:23,853] [contrail_database-worker-3] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='INSERT INTO schema_migration_leader (keyspace_name, leader, took_lead_at, leader_hostname) VALUES (?, ?, dateOf(now()), ?) IF NOT EXISTS USING TTL 300' WARN [2025-08-25 01:36:23,878] [contrail_database-worker-1] c.d.d.c.Cluster - Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. Consider preparing the statement only once. Query='DELETE FROM schema_migration_leader where keyspace_name = ? IF leader = ?' INFO [Native-Transport-Requests-1] 2025-08-25 01:36:24,475 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-1] 2025-08-25 01:36:25,381 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@6dffcb3b[cfId=e9d51550-8153-11f0-a0d3-0975efdf1988,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:25,539 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table WARN [2025-08-25 01:36:25,588] [main] i.c.s.c.Migration016 - altering every table to set `dclocal_read_repair_chance` to zero… WARN [2025-08-25 01:36:25,591] [main] i.c.s.c.Migration016 - alter every table to set `dclocal_read_repair_chance` to zero completed. INFO [Native-Transport-Requests-2] 2025-08-25 01:36:25,592 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@25a74a6c[cfId=da8d08f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@73886579[cfId=da8d08f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-08-25 01:36:25,593 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v3' From org.apache.cassandra.config.CFMetaData@5108ea78[cfId=e21ebfa0-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@60644d0a[cfId=e21ebfa0-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=node_metrics_v3,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimestampType), org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [value]],partitionKeyColumns=[cluster, metric_domain, metric_type, time_bucket, host],clusteringColumns=[ts, metric_scope, metric_name, metric_attribute],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, metric_domain, metric_attribute, time_bucket, ts, metric_type, metric_name, metric_scope, value, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-08-25 01:36:25,593 MigrationManager.java:454 - Update table 'reaper_db/schema_migration' From org.apache.cassandra.config.CFMetaData@5171e10e[cfId=d8fa33f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2e7273f3[cfId=d8fa33f0-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=schema_migration,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.Int32Type),partitionColumns=[[] | [executed_at script script_name]],partitionKeyColumns=[applied_successful],clusteringColumns=[version],keyValidator=org.apache.cassandra.db.marshal.BooleanType,columnMetadata=[script_name, version, applied_successful, executed_at, script],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-08-25 01:36:25,591 MigrationManager.java:454 - Update table 'reaper_db/diagnostic_event_subscription' From org.apache.cassandra.config.CFMetaData@38d4fc56[cfId=e0fa63e0-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4236ddcf[cfId=e0fa63e0-8153-11f0-9cab-d364fe637386,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-08-25 01:36:25,593 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_cluster' From org.apache.cassandra.config.CFMetaData@7b43324e[cfId=db206f50-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3c88fd56[cfId=db206f50-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=repair_run_by_cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-5] 2025-08-25 01:36:25,592 MigrationManager.java:454 - Update table 'reaper_db/node_operations' From org.apache.cassandra.config.CFMetaData@6cc55a49[cfId=dfbadf00-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7ca693db[cfId=dfbadf00-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-10] 2025-08-25 01:36:28,468 MigrationManager.java:454 - Update table 'reaper_db/node_operations' From org.apache.cassandra.config.CFMetaData@6cc55a49[cfId=dfbadf00-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3c00bd07[cfId=dfbadf00-8153-11f0-a0d3-0975efdf1988,ksName=reaper_db,cfName=node_operations,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=30, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [data ts]],partitionKeyColumns=[cluster, type, time_bucket],clusteringColumns=[host],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster, type, time_bucket, ts, data, host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-08-25 01:36:28,546 MigrationManager.java:427 - Update Keyspace 'config_db_uuid' From KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@2871df2b[cfId=e9d51550-8153-11f0-a0d3-0975efdf1988,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} To KeyspaceMetadata{name=config_db_uuid, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[org.apache.cassandra.config.CFMetaData@2871df2b[cfId=e9d51550-8153-11f0-a0d3-0975efdf1988,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]]], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-08-25 01:36:29,641 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table WARN [2025-08-25 01:36:32,505] [main] i.c.ReaperApplication - Reaper is ready to get things done! INFO [MigrationStage:1] 2025-08-25 01:36:34,698 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [Native-Transport-Requests-3] 2025-08-25 01:36:36,384 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@47c8a2d5[cfId=f0640200-8153-11f0-a0d3-0975efdf1988,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:36,535 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [Native-Transport-Requests-6] 2025-08-25 01:36:36,775 MigrationManager.java:454 - Update table 'config_db_uuid/obj_fq_name_table' From org.apache.cassandra.config.CFMetaData@7f06b763[cfId=ef319c80-8153-11f0-9cab-d364fe637386,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6f4f79c8[cfId=ef319c80-8153-11f0-9cab-d364fe637386,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:37,561 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [Native-Transport-Requests-1] 2025-08-25 01:36:39,340 MigrationManager.java:454 - Update table 'config_db_uuid/obj_shared_table' From org.apache.cassandra.config.CFMetaData@28dabd2[cfId=f109b7e0-8153-11f0-b872-d9a04db196bc,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@66064477[cfId=f109b7e0-8153-11f0-b872-d9a04db196bc,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:39,927 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [MigrationStage:1] 2025-08-25 01:36:41,583 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [Native-Transport-Requests-1] 2025-08-25 01:36:42,414 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/loadbalancer_table' From org.apache.cassandra.config.CFMetaData@4e69345e[cfId=f2724890-8153-11f0-9cab-d364fe637386,ksName=svc_monitor_keyspace,cfName=loadbalancer_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@b69e73f[cfId=f2724890-8153-11f0-9cab-d364fe637386,ksName=svc_monitor_keyspace,cfName=loadbalancer_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:44,555 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-1] 2025-08-25 01:36:47,359 MigrationManager.java:454 - Update table 'config_db_uuid/obj_shared_table' From org.apache.cassandra.config.CFMetaData@28dabd2[cfId=f109b7e0-8153-11f0-b872-d9a04db196bc,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@72835e3a[cfId=f109b7e0-8153-11f0-b872-d9a04db196bc,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:49,720 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [Native-Transport-Requests-2] 2025-08-25 01:36:50,814 MigrationManager.java:454 - Update table 'to_bgp_keyspace/route_target_table' From org.apache.cassandra.config.CFMetaData@635d7a27[cfId=f840a460-8153-11f0-b872-d9a04db196bc,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@27f51c7d[cfId=f840a460-8153-11f0-b872-d9a04db196bc,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:51,644 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [Native-Transport-Requests-4] 2025-08-25 01:36:53,326 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@6c7d6647[cfId=fa7d26e0-8153-11f0-a0d3-0975efdf1988,ksName=to_bgp_keyspace,cfName=service_chain_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:36:53,455 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [MigrationStage:1] 2025-08-25 01:36:55,508 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table INFO [Native-Transport-Requests-1] 2025-08-25 01:36:56,376 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_uuid_table' From org.apache.cassandra.config.CFMetaData@2c2204a3[cfId=fbba13b0-8153-11f0-b872-d9a04db196bc,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@77e77bfd[cfId=fbba13b0-8153-11f0-b872-d9a04db196bc,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] + curl http://10.0.0.50:8071/webui/login.html % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 1940 100 1940 0 0 107k 0 --:--:-- --:--:-- --:--:-- 111k ++ tr -d '\r' ++ curl -v -X POST -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=reaperUser&password=reaperPass' http://10.0.0.50:8071/login ++ awk '-F: ' '/JSESSIONID/ { print $2 }' + jsessionid='JSESSIONID=node08qyi3i63o8pmfmgmetd8uzva0.node0; Path=/' + curl --cookie 'JSESSIONID=node08qyi3i63o8pmfmgmetd8uzva0.node0; Path=/' -H 'Content-Type: application/json' -X POST 'http://10.0.0.50:8071/cluster?seedHost=10.0.0.50&jmxPort=7201' % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 + echo 'Reaper started successfully' Reaper started successfully INFO [Native-Transport-Requests-2] 2025-08-25 01:38:03,396 MigrationManager.java:427 - Update Keyspace 'dm_keyspace' From KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} To KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [Native-Transport-Requests-2] 2025-08-25 01:38:03,927 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@12d3ff39[cfId=24920270-8154-11f0-a0d3-0975efdf1988,ksName=dm_keyspace,cfName=dm_pr_vn_ip_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:38:04,061 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [MigrationStage:1] 2025-08-25 01:38:05,516 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [Native-Transport-Requests-1] 2025-08-25 01:38:05,746 MigrationManager.java:454 - Update table 'dm_keyspace/dm_pr_asn_table' From org.apache.cassandra.config.CFMetaData@cdf29c0[cfId=25742510-8154-11f0-b872-d9a04db196bc,ksName=dm_keyspace,cfName=dm_pr_asn_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@44f4ba76[cfId=25742510-8154-11f0-b872-d9a04db196bc,ksName=dm_keyspace,cfName=dm_pr_asn_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:38:06,983 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [Native-Transport-Requests-1] 2025-08-25 01:38:08,506 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@3329832f[cfId=274cb5a0-8154-11f0-a0d3-0975efdf1988,ksName=dm_keyspace,cfName=dm_pnf_resource_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-25 01:38:08,624 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [HANDSHAKE-/10.0.0.254] 2025-08-25 01:43:34,518 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:34,884 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:34,946 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,022 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,051 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,103 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,128 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,144 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,200 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,274 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,330 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,377 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,426 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,434 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,478 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,527 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,537 Validator.java:281 - [repair #e9c89cc0-8154-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:43:35,548 ActiveRepairService.java:452 - [repair #e9c170d0-8154-11f0-9cab-d364fe637386] Not a global repair, will not do anticompaction INFO [Repair-Task-2] 2025-08-25 01:43:36,056 RepairRunnable.java:139 - Starting repair command #1 (ea88df80-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 2, pull repair: false) INFO [Repair-Task-2] 2025-08-25 01:43:36,093 RepairSession.java:228 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(3285611939104082726,3301993197199482194], (8113432217923545849,8176249407085119607]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:3] 2025-08-25 01:43:36,147 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:36,149 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,155 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,158 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,163 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,163 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,171 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:36,173 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:43:36,174 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:43:36,174 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:43:36,174 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:36,221 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:36,222 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,224 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,224 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,226 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,227 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,229 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:36,230 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:36,230 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:36,230 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:36,230 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:36,235 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:36,235 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,236 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,237 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,238 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,240 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,242 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:36,244 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:43:36,244 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:43:36,244 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:43:36,244 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:36,245 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:36,245 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,247 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,248 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,251 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,251 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,253 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:36,254 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:3] 2025-08-25 01:43:36,254 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:43:36,254 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:43:36,254 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:36,259 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:36,259 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,261 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,261 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,264 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,264 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,273 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:36,276 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:36,276 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,278 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,278 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,280 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,280 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,282 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:36,282 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:36,287 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:43:36,282 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:2] 2025-08-25 01:43:36,282 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:43:36,287 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:43:36,287 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:36,288 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:43:36,289 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:36,289 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:6] 2025-08-25 01:43:36,289 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,292 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,292 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,294 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,294 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,296 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:36,296 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:43:36,296 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:7] 2025-08-25 01:43:36,297 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-25 01:43:36,297 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:36,298 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:36,299 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,305 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,305 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,308 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,308 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,314 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:36,314 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:43:36,315 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-08-25 01:43:36,315 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-25 01:43:36,315 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:36,316 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:36,316 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,319 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,319 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,321 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,321 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,326 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:36,326 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:43:36,327 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:36,327 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:36,328 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:36,396 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:36,396 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,398 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,399 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,403 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,403 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,406 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:36,406 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:5] 2025-08-25 01:43:36,407 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:43:36,407 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:7] 2025-08-25 01:43:36,408 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:36,415 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:36,416 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,423 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,423 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,427 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,427 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,428 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:36,429 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:3] 2025-08-25 01:43:36,429 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:6] 2025-08-25 01:43:36,429 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:43:36,429 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:36,431 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:36,431 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,434 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,434 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,435 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,435 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,437 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:36,441 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:43:36,441 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:43:36,441 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:43:36,441 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:36,446 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:36,446 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,454 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,454 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,458 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,458 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,460 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:36,460 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:7] 2025-08-25 01:43:36,460 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-25 01:43:36,460 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:43:36,460 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:36,464 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:36,464 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,467 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,467 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,469 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,469 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,475 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:36,477 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:36,477 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:36,477 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:36,477 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:36,485 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:36,485 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,494 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,494 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,496 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,496 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,498 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:43:36,499 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:36,499 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:36,499 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:8] 2025-08-25 01:43:36,500 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:8] 2025-08-25 01:43:36,502 RepairJob.java:234 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:8] 2025-08-25 01:43:36,502 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,505 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,505 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,507 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,507 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:36,517 RepairSession.java:180 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:36,518 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-08-25 01:43:36,518 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:43:36,518 SyncTask.java:66 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-08-25 01:43:36,518 RepairJob.java:143 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:36,519 RepairSession.java:270 - [repair #ea8e5dc0-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:7] 2025-08-25 01:43:36,520 RepairRunnable.java:261 - Repair session ea8e5dc0-8154-11f0-a0d3-0975efdf1988 for range [(3285611939104082726,3301993197199482194], (8113432217923545849,8176249407085119607]] finished INFO [RepairJobTask:7] 2025-08-25 01:43:36,523 ActiveRepairService.java:452 - [repair #ea88df80-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-25 01:43:36,531 RepairRunnable.java:343 - Repair command #1 finished in 0 seconds INFO [HANDSHAKE-/10.0.0.249] 2025-08-25 01:43:36,604 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,137 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,155 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,176 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,187 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,203 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,251 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,269 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,287 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,301 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,316 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,346 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,402 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,420 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,431 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,447 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,464 Validator.java:281 - [repair #eb223950-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:43:37,486 ActiveRepairService.java:452 - [repair #eb182730-8154-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:43:44,930 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:44,952 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:43:44,962 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:43:44,982 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:43:44,991 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,043 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,062 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,085 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,098 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,110 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,126 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,173 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,186 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,210 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,228 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,250 Validator.java:281 - [repair #efc3d4f0-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:43:45,265 ActiveRepairService.java:452 - [repair #efbfdd50-8154-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [Repair-Task-3] 2025-08-25 01:43:46,138 RepairRunnable.java:139 - Starting repair command #2 (f08b43a0-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 6, pull repair: false) INFO [Repair-Task-3] 2025-08-25 01:43:46,160 RepairSession.java:228 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(6903842824702902789,6933948299523018883], (3272886454153936434,3281444069129578606], (-1595256026949079295,-1572135559417000398], (3337901743585771331,3351809722961944655], (-2179982744545882894,-2174778656319358569], (-2017824721261524412,-2003980683133342188]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:1] 2025-08-25 01:43:46,289 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:46,289 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,297 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,297 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,300 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,300 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,312 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:46,319 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:46,319 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:43:46,319 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:43:46,320 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:46,355 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:46,355 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,361 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,361 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,365 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,366 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,371 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:46,373 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:46,373 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:46,373 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:43:46,374 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:46,377 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:46,379 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,383 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,383 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,387 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,387 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,389 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:46,391 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-25 01:43:46,392 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:43:46,392 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-25 01:43:46,392 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:46,397 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:46,397 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,400 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,400 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,406 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,406 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,408 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:46,408 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:43:46,408 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:4] 2025-08-25 01:43:46,409 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:43:46,409 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:46,413 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:46,413 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,415 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,416 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,419 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,419 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,421 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:46,422 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:1] 2025-08-25 01:43:46,423 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:43:46,428 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:46,429 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:46,431 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:46,432 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,434 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,434 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,440 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,441 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,443 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:46,443 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:43:46,443 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:7] 2025-08-25 01:43:46,445 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:43:46,445 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:46,448 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:46,448 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,451 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,451 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,453 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,453 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,455 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:46,457 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:46,457 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:46,457 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:46,457 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:46,459 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:46,459 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,460 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,460 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,462 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,462 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,464 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:46,464 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-25 01:43:46,464 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-25 01:43:46,465 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-25 01:43:46,467 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:46,470 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:46,470 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,481 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,481 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,484 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,484 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,487 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:46,487 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:46,490 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:46,490 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:46,491 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:46,526 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:46,527 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,534 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,535 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,537 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,537 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,543 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:46,548 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:43:46,548 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:43:46,551 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:7] 2025-08-25 01:43:46,553 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:46,554 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:46,555 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,559 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,559 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,567 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,567 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,589 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:46,590 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:43:46,590 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-08-25 01:43:46,591 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:43:46,592 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:46,592 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:46,592 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,595 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,595 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,597 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,597 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,599 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:46,600 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:43:46,600 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-08-25 01:43:46,600 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:43:46,601 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:46,605 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:46,606 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,637 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,637 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,654 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,654 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,661 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:46,663 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-25 01:43:46,663 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:43:46,663 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:43:46,663 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:46,676 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:46,676 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,681 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,681 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,687 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,687 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,690 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:46,691 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:46,691 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-25 01:43:46,691 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:46,691 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:46,694 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:46,694 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,700 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,700 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,702 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,702 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,705 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:43:46,705 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:46,705 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:43:46,705 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:6] 2025-08-25 01:43:46,705 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:46,708 RepairJob.java:234 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:46,708 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,710 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,710 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,712 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,712 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:46,714 RepairSession.java:180 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:43:46,714 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-08-25 01:43:46,714 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:46,714 SyncTask.java:66 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-08-25 01:43:46,716 RepairJob.java:143 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:46,716 RepairSession.java:270 - [repair #f08e9f00-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:7] 2025-08-25 01:43:46,716 RepairRunnable.java:261 - Repair session f08e9f00-8154-11f0-a0d3-0975efdf1988 for range [(6903842824702902789,6933948299523018883], (3272886454153936434,3281444069129578606], (-1595256026949079295,-1572135559417000398], (3337901743585771331,3351809722961944655], (-2179982744545882894,-2174778656319358569], (-2017824721261524412,-2003980683133342188]] finished INFO [RepairJobTask:7] 2025-08-25 01:43:46,717 ActiveRepairService.java:452 - [repair #f08b43a0-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:43:46,721 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds INFO [Repair-Task-4] 2025-08-25 01:43:47,012 RepairRunnable.java:139 - Starting repair command #3 (f110a040-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-4] 2025-08-25 01:43:47,028 RepairSession.java:228 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(4023133982195665979,4071444281663075047]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-25 01:43:47,058 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:47,058 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,060 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,060 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,063 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,063 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,065 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,066 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:43:47,068 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:47,069 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:43:47,069 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:47,108 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:47,108 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,110 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,110 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,113 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,113 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,114 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,116 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:43:47,116 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:43:47,119 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:47,120 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:5] 2025-08-25 01:43:47,121 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:43:47,121 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,121 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,121 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,124 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,124 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,126 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:47,126 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-25 01:43:47,126 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:43:47,126 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-08-25 01:43:47,127 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:47,128 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:47,129 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,132 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,132 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,134 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,134 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,136 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:47,136 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:3] 2025-08-25 01:43:47,136 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:43:47,136 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:3] 2025-08-25 01:43:47,137 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:47,139 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:47,140 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,149 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,149 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,159 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,160 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,163 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,164 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:47,164 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:43:47,164 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:47,164 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:47,171 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:47,171 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,173 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,178 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,186 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,186 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,191 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:47,194 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:43:47,199 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:43:47,199 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:43:47,200 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:47,209 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:47,209 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,217 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,218 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,220 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,220 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,225 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:47,225 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:43:47,226 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-08-25 01:43:47,226 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:43:47,226 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:47,229 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:47,229 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,230 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,230 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,236 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,236 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,248 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:47,249 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:43:47,249 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:43:47,249 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:43:47,250 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:47,258 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:47,258 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,263 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,263 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,265 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,265 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,267 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,268 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:47,268 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:43:47,268 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-25 01:43:47,269 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:47,305 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:47,306 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,309 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,309 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,311 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,312 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,313 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:47,314 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:43:47,314 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:7] 2025-08-25 01:43:47,315 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-25 01:43:47,316 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:47,319 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:47,320 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,322 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,322 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,323 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,324 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,325 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,326 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:43:47,326 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:4] 2025-08-25 01:43:47,326 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:43:47,326 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:47,329 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:47,329 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,331 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,332 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,334 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,334 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,337 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,338 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-08-25 01:43:47,338 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:43:47,339 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-08-25 01:43:47,339 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:47,343 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:47,343 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,347 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,347 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,349 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,350 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,360 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:47,360 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-08-25 01:43:47,361 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-08-25 01:43:47,361 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-08-25 01:43:47,361 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:47,366 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:47,366 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,369 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,369 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,372 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,372 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,375 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:47,375 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:43:47,375 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:43:47,375 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:43:47,376 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:47,381 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:47,381 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,384 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,384 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,403 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,403 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,405 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:47,406 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:47,406 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:47,406 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:47,406 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:47,409 RepairJob.java:234 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:47,409 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,410 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,410 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,412 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,412 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:47,414 RepairSession.java:180 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:47,414 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:47,414 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-08-25 01:43:47,414 SyncTask.java:66 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:47,414 RepairJob.java:143 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:47,415 RepairSession.java:270 - [repair #f1131140-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:2] 2025-08-25 01:43:47,415 RepairRunnable.java:261 - Repair session f1131140-8154-11f0-a0d3-0975efdf1988 for range [(4023133982195665979,4071444281663075047]] finished INFO [RepairJobTask:2] 2025-08-25 01:43:47,416 ActiveRepairService.java:452 - [repair #f110a040-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:43:47,419 RepairRunnable.java:343 - Repair command #3 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-08-25 01:43:54,945 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:54,974 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:43:54,990 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,000 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,010 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,058 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,069 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,086 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,103 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,125 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,141 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,189 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,207 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,226 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,267 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,281 Validator.java:281 - [repair #f5c15710-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:43:55,296 ActiveRepairService.java:452 - [repair #f5bff780-8154-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [Repair-Task-5] 2025-08-25 01:43:56,209 RepairRunnable.java:139 - Starting repair command #4 (f68bfa10-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 2, pull repair: false) INFO [Repair-Task-5] 2025-08-25 01:43:56,215 RepairSession.java:228 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(4924057315616819847,4954583476868821717], (1678071167640808597,1687206045758017783]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:1] 2025-08-25 01:43:56,231 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:56,232 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,234 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,235 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,250 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,250 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,259 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:56,263 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-08-25 01:43:56,264 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:56,264 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:56,264 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:56,308 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:56,309 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,311 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,311 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,314 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,315 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,319 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:56,319 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:43:56,319 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:43:56,319 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:43:56,319 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:56,324 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:56,324 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,326 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,326 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,335 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,335 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,336 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:56,342 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-25 01:43:56,342 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-25 01:43:56,344 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-25 01:43:56,344 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:56,356 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:56,357 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,361 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,361 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,364 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,364 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,367 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:56,368 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:43:56,368 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:43:56,368 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:4] 2025-08-25 01:43:56,368 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:56,374 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:56,376 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,377 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,378 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,384 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,384 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,385 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:56,386 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:56,386 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:56,386 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:43:56,386 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:56,388 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:56,388 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,389 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,389 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,395 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,395 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,397 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:56,397 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:43:56,397 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-25 01:43:56,397 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-25 01:43:56,398 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:56,399 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:56,399 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,401 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,401 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,402 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,402 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,403 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:56,403 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:56,404 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:43:56,404 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-25 01:43:56,405 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:56,408 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:56,408 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,410 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,411 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,414 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,414 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,416 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:56,417 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:43:56,417 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:43:56,417 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:43:56,417 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:56,419 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:56,420 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,421 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,421 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,424 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,424 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,426 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:56,426 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-25 01:43:56,426 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-25 01:43:56,426 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-25 01:43:56,427 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:56,460 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:56,461 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,462 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,463 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,465 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,465 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,466 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:56,466 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-25 01:43:56,466 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:43:56,467 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-25 01:43:56,468 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:56,468 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:56,469 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,477 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,477 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,484 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,484 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,485 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:56,486 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:43:56,486 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-08-25 01:43:56,489 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:43:56,489 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:56,492 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:56,492 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,493 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,494 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,495 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,495 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,498 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:56,498 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:43:56,499 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-08-25 01:43:56,498 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:43:56,499 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:56,503 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:56,505 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,507 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,507 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,513 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,513 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,515 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:56,515 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-08-25 01:43:56,515 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:7] 2025-08-25 01:43:56,515 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:43:56,515 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:56,519 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:56,519 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,524 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,524 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,527 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,527 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,530 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:43:56,532 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-25 01:43:56,532 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:2] 2025-08-25 01:43:56,532 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-25 01:43:56,533 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:56,536 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:56,536 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,538 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,538 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,543 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,543 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,544 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:43:56,545 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-25 01:43:56,548 RepairJob.java:234 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:56,548 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:1] 2025-08-25 01:43:56,548 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:56,548 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:43:56,549 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,550 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,550 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,552 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,552 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:56,554 RepairSession.java:180 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:56,554 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:43:56,554 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-25 01:43:56,554 SyncTask.java:66 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:43:56,554 RepairJob.java:143 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:56,555 RepairSession.java:270 - [repair #f68ce470-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:6] 2025-08-25 01:43:56,555 RepairRunnable.java:261 - Repair session f68ce470-8154-11f0-a0d3-0975efdf1988 for range [(4924057315616819847,4954583476868821717], (1678071167640808597,1687206045758017783]] finished INFO [RepairJobTask:6] 2025-08-25 01:43:56,556 ActiveRepairService.java:452 - [repair #f68bfa10-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:43:56,560 RepairRunnable.java:343 - Repair command #4 finished in 0 seconds INFO [Repair-Task-6] 2025-08-25 01:43:57,088 RepairRunnable.java:139 - Starting repair command #5 (f7121a00-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 5, pull repair: false) INFO [Repair-Task-6] 2025-08-25 01:43:57,102 RepairSession.java:228 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(-5171965159082867716,-5161835899912040189], (1337008383127125076,1348338435916875559], (-2620577737306096849,-2573563038829806032], (-8461167377688808199,-8456043116857083116], (5958586173015856155,5963485977667710518]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-25 01:43:57,167 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,167 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,176 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,176 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,179 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,179 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,181 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,182 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:57,188 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-08-25 01:43:57,188 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:43:57,189 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:57,226 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:57,226 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,235 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,235 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,237 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,237 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,245 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:43:57,245 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:43:57,246 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:6] 2025-08-25 01:43:57,249 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:43:57,250 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:57,252 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:43:57,252 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,254 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,254 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,256 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,256 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,261 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:43:57,263 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:43:57,263 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-08-25 01:43:57,263 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:43:57,263 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:1] 2025-08-25 01:43:57,266 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:43:57,267 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,271 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,271 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,273 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,273 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,278 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:43:57,278 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:3] 2025-08-25 01:43:57,279 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:7] 2025-08-25 01:43:57,283 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:6] 2025-08-25 01:43:57,283 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:7] 2025-08-25 01:43:57,290 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:43:57,290 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,292 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,292 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,294 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,295 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,298 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,299 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:43:57,299 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:43:57,299 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:43:57,299 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:57,302 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:57,303 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,305 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,305 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,307 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,307 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,321 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,321 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-25 01:43:57,321 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:43:57,321 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-25 01:43:57,323 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:57,326 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,326 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,327 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,328 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,330 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,330 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,337 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,338 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:57,338 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-25 01:43:57,338 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:43:57,338 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:57,341 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:57,342 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,343 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,343 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,344 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,345 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,351 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,352 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:43:57,352 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:43:57,352 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:43:57,352 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:57,363 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,363 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,365 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,365 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,374 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,377 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,383 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,386 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:57,386 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:43:57,386 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-25 01:43:57,387 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:5] 2025-08-25 01:43:57,430 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:43:57,430 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,431 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,432 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,434 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,434 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,441 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,442 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:43:57,442 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:43:57,443 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:43:57,443 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:57,447 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,447 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,450 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,450 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,452 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,457 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,459 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,459 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-08-25 01:43:57,459 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:2] 2025-08-25 01:43:57,459 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:2] 2025-08-25 01:43:57,459 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:57,462 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,463 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,465 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,465 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,468 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,468 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,472 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,473 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-25 01:43:57,473 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:43:57,473 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-25 01:43:57,473 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:57,479 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:57,479 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,484 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,484 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,487 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,488 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,496 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:43:57,496 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:43:57,496 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-08-25 01:43:57,496 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:43:57,497 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:2] 2025-08-25 01:43:57,500 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:43:57,500 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,503 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,503 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,510 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,510 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,512 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,513 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:57,513 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:5] 2025-08-25 01:43:57,514 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:43:57,514 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-25 01:43:57,520 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:43:57,521 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,524 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,524 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,526 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,526 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,528 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:43:57,528 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:43:57,528 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:43:57,528 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:43:57,528 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:3] 2025-08-25 01:43:57,532 RepairJob.java:234 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:43:57,532 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,535 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,535 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,537 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,537 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:43:57,539 RepairSession.java:180 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:43:57,539 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:43:57,539 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:43:57,539 SyncTask.java:66 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:43:57,539 RepairJob.java:143 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:4] 2025-08-25 01:43:57,540 RepairSession.java:270 - [repair #f7143ce0-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:4] 2025-08-25 01:43:57,540 RepairRunnable.java:261 - Repair session f7143ce0-8154-11f0-a0d3-0975efdf1988 for range [(-5171965159082867716,-5161835899912040189], (1337008383127125076,1348338435916875559], (-2620577737306096849,-2573563038829806032], (-8461167377688808199,-8456043116857083116], (5958586173015856155,5963485977667710518]] finished INFO [RepairJobTask:4] 2025-08-25 01:43:57,541 ActiveRepairService.java:452 - [repair #f7121a00-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:43:57,551 RepairRunnable.java:343 - Repair command #5 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,097 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,116 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,161 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,194 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,207 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,273 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,282 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,297 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,309 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,328 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,340 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,410 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,433 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,456 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,479 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,494 Validator.java:281 - [repair #fbc6c870-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:05,520 ActiveRepairService.java:452 - [repair #fbbf9c80-8154-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,461 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,498 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,512 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,528 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,556 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,616 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,640 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,657 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,703 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,724 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,743 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,808 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,823 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,834 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,854 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,876 Validator.java:281 - [repair #fc969b90-8154-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:06,887 ActiveRepairService.java:452 - [repair #fc91e0a0-8154-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [Repair-Task-7] 2025-08-25 01:44:07,197 RepairRunnable.java:139 - Starting repair command #6 (fd189cd0-8154-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-7] 2025-08-25 01:44:07,208 RepairSession.java:228 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(-2357553942810641165,-2252541555222342382]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:1] 2025-08-25 01:44:07,225 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:07,225 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,228 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,228 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,230 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,230 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,235 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:07,237 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-08-25 01:44:07,237 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:07,237 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-08-25 01:44:07,238 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:07,278 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:07,278 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,281 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,281 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,282 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,283 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,285 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:07,285 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-25 01:44:07,287 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:7] 2025-08-25 01:44:07,288 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:07,288 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:07,293 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:07,293 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,302 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,303 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,308 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,309 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,321 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:07,322 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-08-25 01:44:07,322 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-08-25 01:44:07,322 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:7] 2025-08-25 01:44:07,323 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:07,339 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:07,339 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,345 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,345 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,349 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,349 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,365 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:07,366 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:07,366 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:07,366 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:07,366 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:07,370 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:07,370 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,372 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,372 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,376 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,377 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,382 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:07,384 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:7] 2025-08-25 01:44:07,384 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:07,384 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:7] 2025-08-25 01:44:07,384 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:07,388 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:07,388 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,396 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,399 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,401 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,401 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,403 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:07,404 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:44:07,404 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:44:07,404 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:44:07,404 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:07,411 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:07,411 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,414 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,414 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,416 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,416 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,418 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:07,418 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:07,418 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-25 01:44:07,418 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:07,418 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:07,422 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:07,422 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,424 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,424 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,426 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,426 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,429 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:07,429 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:44:07,429 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-08-25 01:44:07,429 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:44:07,430 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:07,435 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:07,435 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,437 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,437 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,438 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,438 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,440 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:07,440 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-25 01:44:07,440 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:7] 2025-08-25 01:44:07,440 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-25 01:44:07,441 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:07,479 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:07,479 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,481 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,482 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,484 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,484 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,486 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:07,486 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:44:07,487 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:07,487 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:44:07,487 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:07,490 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:07,490 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,492 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,493 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,495 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,495 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,497 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:07,510 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-08-25 01:44:07,510 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:6] 2025-08-25 01:44:07,510 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-08-25 01:44:07,510 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:07,514 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:07,514 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,523 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,523 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,526 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,526 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,531 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:07,531 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-08-25 01:44:07,531 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-08-25 01:44:07,531 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-08-25 01:44:07,531 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:07,548 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:07,548 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,552 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,553 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,554 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,555 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,557 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:07,558 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-25 01:44:07,558 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:3] 2025-08-25 01:44:07,558 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:7] 2025-08-25 01:44:07,558 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:07,562 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:07,562 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,566 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,566 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,567 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,567 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,577 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:07,578 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:7] 2025-08-25 01:44:07,578 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:44:07,577 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:7] 2025-08-25 01:44:07,578 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:07,584 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:07,585 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,589 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,589 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,592 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,592 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,597 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:07,597 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:07,597 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:07,597 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:07,597 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:07,601 RepairJob.java:234 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:07,601 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,605 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,605 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,608 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,608 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:07,610 RepairSession.java:180 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:07,610 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:44:07,610 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:07,611 SyncTask.java:66 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-25 01:44:07,611 RepairJob.java:143 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:07,611 RepairSession.java:270 - [repair #fd1a2370-8154-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:6] 2025-08-25 01:44:07,612 RepairRunnable.java:261 - Repair session fd1a2370-8154-11f0-a0d3-0975efdf1988 for range [(-2357553942810641165,-2252541555222342382]] finished INFO [RepairJobTask:6] 2025-08-25 01:44:07,614 ActiveRepairService.java:452 - [repair #fd189cd0-8154-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:44:07,617 RepairRunnable.java:343 - Repair command #6 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,037 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,056 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,064 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,072 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,079 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,123 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,137 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,150 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,161 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,182 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,196 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,242 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,258 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,270 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,278 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,297 Validator.java:281 - [repair #01bc0d30-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:15,312 ActiveRepairService.java:452 - [repair #01baada0-8155-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,434 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,460 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,475 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,482 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,501 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,545 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,555 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,573 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,592 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,604 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,615 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,669 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,681 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,696 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,726 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,757 Validator.java:281 - [repair #0292e530-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:16,779 ActiveRepairService.java:452 - [repair #02902610-8155-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,293 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,308 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,326 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,340 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,352 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,364 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,375 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,388 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,400 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,455 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,510 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,522 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,533 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,542 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,554 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,572 Validator.java:281 - [repair #0315a9c0-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:17,582 ActiveRepairService.java:452 - [repair #0314bf60-8155-11f0-9cab-d364fe637386] Not a global repair, will not do anticompaction INFO [Repair-Task-8] 2025-08-25 01:44:25,062 RepairRunnable.java:139 - Starting repair command #7 (07be9860-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-8] 2025-08-25 01:44:25,090 RepairSession.java:228 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(1849147306065168254,1949360667842197108]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:3] 2025-08-25 01:44:25,165 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:25,165 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,168 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,168 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,185 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,185 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,187 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:25,189 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:44:25,189 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-08-25 01:44:25,193 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:44:25,193 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,228 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,228 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,230 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,231 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,232 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,232 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,237 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:25,240 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:25,240 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-25 01:44:25,240 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-25 01:44:25,242 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:25,243 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:25,244 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,245 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,246 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,248 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,248 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,249 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:25,250 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:44:25,251 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:25,251 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:44:25,251 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:25,258 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:25,260 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,262 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,262 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,264 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,264 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,267 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:25,267 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:25,267 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:25,267 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:25,268 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:25,271 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:25,271 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,274 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,274 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,275 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,276 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,277 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:25,277 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:25,277 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:44:25,277 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:25,277 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,279 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,279 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,281 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,281 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,283 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,283 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,285 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:25,285 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:44:25,285 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-25 01:44:25,285 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:44:25,285 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,293 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,293 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,296 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,296 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,298 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,298 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,299 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:25,299 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-25 01:44:25,299 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:44:25,299 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-25 01:44:25,299 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,304 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,304 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,306 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,307 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,308 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,308 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,309 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:25,310 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:44:25,310 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-25 01:44:25,310 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:44:25,310 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,315 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,316 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,319 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,319 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,321 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,321 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,322 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:25,323 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:44:25,323 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:44:25,323 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-25 01:44:25,323 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,363 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,363 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,365 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,365 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,367 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,367 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,368 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:25,369 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:25,369 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:7] 2025-08-25 01:44:25,369 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-08-25 01:44:25,373 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,374 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,374 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,378 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,378 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,380 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,380 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,385 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:25,386 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:44:25,386 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:44:25,386 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:44:25,386 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:25,390 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:25,390 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,391 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,392 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,393 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,393 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,395 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:25,395 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:44:25,395 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:44:25,395 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-25 01:44:25,396 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:25,406 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:25,406 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,408 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,408 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,410 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,410 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,412 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:25,412 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-25 01:44:25,413 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-25 01:44:25,413 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-25 01:44:25,413 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:25,416 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:25,416 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,419 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,420 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,421 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,422 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,424 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:25,424 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:44:25,424 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:5] 2025-08-25 01:44:25,425 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:44:25,425 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:25,434 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:25,434 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,436 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,436 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,438 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,438 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,443 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:25,443 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:25,443 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:7] 2025-08-25 01:44:25,443 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:25,443 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:25,447 RepairJob.java:234 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:25,447 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,449 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,449 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,451 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,451 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:25,462 RepairSession.java:180 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:25,463 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:44:25,463 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:25,463 SyncTask.java:66 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:44:25,463 RepairJob.java:143 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:25,463 RepairSession.java:270 - [repair #07c2de20-8155-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:4] 2025-08-25 01:44:25,463 RepairRunnable.java:261 - Repair session 07c2de20-8155-11f0-a0d3-0975efdf1988 for range [(1849147306065168254,1949360667842197108]] finished INFO [RepairJobTask:4] 2025-08-25 01:44:25,464 ActiveRepairService.java:452 - [repair #07be9860-8155-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:6] 2025-08-25 01:44:25,468 RepairRunnable.java:343 - Repair command #7 finished in 0 seconds INFO [Repair-Task-9] 2025-08-25 01:44:26,478 RepairRunnable.java:139 - Starting repair command #8 (089681d0-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-9] 2025-08-25 01:44:26,486 RepairSession.java:228 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(-5594286708904094236,-5439819757650427715]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:1] 2025-08-25 01:44:26,501 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:26,501 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,503 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,503 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,506 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,506 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,507 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:26,517 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:26,518 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:44:26,518 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:26,518 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:26,558 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:26,559 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,561 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,561 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,563 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,563 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,567 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:26,573 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:26,573 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:26,574 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:26,574 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:26,577 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:26,577 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,578 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,579 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,580 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,581 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,582 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:26,584 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-08-25 01:44:26,584 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:44:26,585 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:7] 2025-08-25 01:44:26,585 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:26,585 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:26,586 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,589 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,589 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,591 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,591 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,593 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:26,593 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:26,593 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:26,594 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:26,594 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:26,598 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:26,599 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,600 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,601 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,603 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,604 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,605 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:26,607 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:26,607 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:2] 2025-08-25 01:44:26,609 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:26,611 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:26,611 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:26,611 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,613 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,614 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,616 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,616 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,618 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:26,618 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:44:26,618 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:44:26,619 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:44:26,620 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:26,623 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:26,624 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,625 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,625 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,628 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,628 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,635 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:7] 2025-08-25 01:44:26,635 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:7] 2025-08-25 01:44:26,635 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:7] 2025-08-25 01:44:26,636 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:7] 2025-08-25 01:44:26,636 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:26,637 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:26,637 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,639 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,639 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,641 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,641 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,642 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:26,642 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:44:26,642 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-08-25 01:44:26,643 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:44:26,643 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:26,643 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:7] 2025-08-25 01:44:26,644 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,644 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,644 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,646 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,646 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,648 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:26,648 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:26,649 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-25 01:44:26,649 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:26,649 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:26,687 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:26,687 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,689 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,689 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,691 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,691 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,692 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:26,693 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:26,694 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:5] 2025-08-25 01:44:26,694 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:26,694 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:26,697 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:26,698 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,699 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,700 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,701 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,701 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,702 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:26,703 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:44:26,703 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:2] 2025-08-25 01:44:26,703 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:44:26,703 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:26,706 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:26,706 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,707 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,707 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,708 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,709 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,710 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:26,710 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:44:26,710 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:4] 2025-08-25 01:44:26,710 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:44:26,710 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:26,714 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:26,714 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,715 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,716 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,718 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,718 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,719 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:26,719 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:44:26,719 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-25 01:44:26,720 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:44:26,720 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:26,769 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:26,769 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,777 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,779 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,782 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,782 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,784 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:26,784 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-25 01:44:26,784 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:2] 2025-08-25 01:44:26,784 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-25 01:44:26,784 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:26,797 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:26,798 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,801 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,804 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,807 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,807 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,808 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:26,808 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:26,808 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:26,808 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:26,808 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:26,813 RepairJob.java:234 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:26,814 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,815 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,815 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,821 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,821 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:26,824 RepairSession.java:180 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:26,825 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:26,825 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:44:26,826 SyncTask.java:66 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:26,826 RepairJob.java:143 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:26,827 RepairSession.java:270 - [repair #0897e160-8155-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:1] 2025-08-25 01:44:26,827 RepairRunnable.java:261 - Repair session 0897e160-8155-11f0-a0d3-0975efdf1988 for range [(-5594286708904094236,-5439819757650427715]] finished INFO [RepairJobTask:1] 2025-08-25 01:44:26,828 ActiveRepairService.java:452 - [repair #089681d0-8155-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-25 01:44:26,830 RepairRunnable.java:343 - Repair command #8 finished in 0 seconds INFO [Repair-Task-10] 2025-08-25 01:44:27,300 RepairRunnable.java:139 - Starting repair command #9 (09141640-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 3, pull repair: false) INFO [Repair-Task-10] 2025-08-25 01:44:27,315 RepairSession.java:228 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(-3464228044227952797,-3442999229910629434], (-2719407870975237438,-2696710550152509137], (7754660028753457058,7776964252401932154]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:1] 2025-08-25 01:44:27,355 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,355 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,357 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,358 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,360 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,360 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,365 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:27,368 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:44:27,368 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-25 01:44:27,369 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-25 01:44:27,369 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:27,421 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:27,421 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,423 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,424 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,426 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,426 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,427 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:27,427 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:27,427 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:5] 2025-08-25 01:44:27,428 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:27,428 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:27,431 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:27,432 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,433 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,433 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,438 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,438 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,440 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:27,440 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:27,441 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:27,441 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:27,441 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,443 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,443 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,448 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,448 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,450 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,451 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,452 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:27,453 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:27,453 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:27,453 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:27,453 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,457 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,458 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,459 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,459 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,461 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,461 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,463 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:27,464 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-25 01:44:27,464 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:44:27,464 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:1] 2025-08-25 01:44:27,464 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,467 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,467 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,472 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,472 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,474 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,474 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,478 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:27,479 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-25 01:44:27,479 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-25 01:44:27,479 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-25 01:44:27,479 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:27,484 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:27,484 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,485 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,485 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,488 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,488 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,490 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:27,492 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:27,492 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:27,492 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:27,492 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:27,493 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:27,494 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,499 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,500 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,503 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,503 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,504 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:27,504 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-08-25 01:44:27,504 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:44:27,504 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-25 01:44:27,504 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:27,507 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:27,507 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,509 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,509 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,511 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,511 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,513 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:27,514 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-25 01:44:27,514 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-25 01:44:27,514 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-25 01:44:27,514 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,551 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,551 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,553 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,553 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,555 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,555 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,557 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:27,557 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:27,558 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:44:27,558 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-08-25 01:44:27,558 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:27,559 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:27,559 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,561 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,561 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,562 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,562 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,565 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:27,565 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:44:27,566 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:2] 2025-08-25 01:44:27,566 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:1] 2025-08-25 01:44:27,566 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,568 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,568 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,570 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,571 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,572 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,573 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,574 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:27,575 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:44:27,575 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-25 01:44:27,576 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-25 01:44:27,578 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:27,584 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:27,584 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,586 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,587 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,590 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,591 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,594 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:27,594 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:44:27,594 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-08-25 01:44:27,594 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-25 01:44:27,595 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:27,602 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:27,602 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,604 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,604 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,606 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,606 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,608 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:27,608 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:2] 2025-08-25 01:44:27,608 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-25 01:44:27,608 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:2] 2025-08-25 01:44:27,609 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:27,611 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:27,612 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,613 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,613 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,615 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,615 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,616 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:27,616 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,616 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,616 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,617 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:27,621 RepairJob.java:234 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:27,621 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,622 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,623 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,625 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,625 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:27,627 RepairSession.java:180 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:27,627 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,627 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,627 SyncTask.java:66 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:27,627 RepairJob.java:143 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:27,628 RepairSession.java:270 - [repair #09166030-8155-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:5] 2025-08-25 01:44:27,628 RepairRunnable.java:261 - Repair session 09166030-8155-11f0-a0d3-0975efdf1988 for range [(-3464228044227952797,-3442999229910629434], (-2719407870975237438,-2696710550152509137], (7754660028753457058,7776964252401932154]] finished INFO [RepairJobTask:5] 2025-08-25 01:44:27,629 ActiveRepairService.java:452 - [repair #09141640-8155-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-25 01:44:27,631 RepairRunnable.java:343 - Repair command #9 finished in 0 seconds INFO [Repair-Task-11] 2025-08-25 01:44:35,228 RepairRunnable.java:139 - Starting repair command #10 (0dcdcdc0-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 5, pull repair: false) INFO [Repair-Task-11] 2025-08-25 01:44:35,249 RepairSession.java:228 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(8707783103061001635,8714442338712568895], (-1339838517096654297,-1329831482630162157], (-491488413420932955,-491320590237577696], (-5273671614794124226,-5252215016759704759], (-4391300689057894011,-4342438392148372077]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-25 01:44:35,380 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:35,381 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,384 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,384 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,387 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,388 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,389 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:35,393 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:35,393 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-25 01:44:35,393 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:35,395 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:35,431 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:35,432 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,434 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,434 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,436 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,436 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,438 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:35,439 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:35,440 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:35,440 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:35,440 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:35,441 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:35,441 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,443 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,444 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,448 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,448 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,450 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:35,452 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:35,453 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:35,453 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:35,456 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:35,456 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:3] 2025-08-25 01:44:35,456 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,460 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,460 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,462 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,462 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,463 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:35,465 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:44:35,465 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:35,466 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:44:35,471 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:35,479 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:35,479 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,485 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,485 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,487 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,487 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,489 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:35,490 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:44:35,490 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:44:35,490 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-25 01:44:35,491 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:35,495 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:35,495 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,506 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,506 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,517 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,517 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,519 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:35,520 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:44:35,520 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:44:35,520 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-25 01:44:35,521 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:35,523 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:35,523 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,526 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,527 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,528 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,528 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,530 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:35,530 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:35,530 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:35,530 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:35,530 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:35,533 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:35,533 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,535 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,535 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,537 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,537 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,538 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:35,538 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-25 01:44:35,538 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-08-25 01:44:35,538 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-25 01:44:35,539 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:35,540 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:35,540 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,542 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,542 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,545 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,545 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,546 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:35,546 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:35,547 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:35,547 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:35,547 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:35,587 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:35,587 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,589 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,589 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,592 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,592 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,594 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:35,594 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:44:35,594 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-25 01:44:35,594 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:44:35,594 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:35,597 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:35,597 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,598 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,599 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,600 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,600 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,602 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:35,602 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:6] 2025-08-25 01:44:35,602 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:2] 2025-08-25 01:44:35,602 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:6] 2025-08-25 01:44:35,602 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:6] 2025-08-25 01:44:35,605 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-08-25 01:44:35,605 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,606 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,607 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,612 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,612 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,614 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:35,615 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-08-25 01:44:35,615 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-25 01:44:35,616 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-08-25 01:44:35,616 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:35,621 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:35,621 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,631 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,631 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,640 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,640 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,643 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:35,643 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:44:35,643 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-25 01:44:35,643 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:44:35,644 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:35,646 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:35,646 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,650 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,651 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,655 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,655 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,657 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:35,657 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-25 01:44:35,658 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:7] 2025-08-25 01:44:35,659 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:44:35,659 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:7] 2025-08-25 01:44:35,662 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-08-25 01:44:35,662 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,664 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,664 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,667 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,667 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,670 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:35,670 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:35,670 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-25 01:44:35,670 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:35,670 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:35,674 RepairJob.java:234 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:35,674 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,675 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,675 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,678 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,678 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:35,680 RepairSession.java:180 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:35,680 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:35,680 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:35,680 SyncTask.java:66 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-25 01:44:35,680 RepairJob.java:143 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:35,681 RepairSession.java:270 - [repair #0dd10210-8155-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:5] 2025-08-25 01:44:35,681 RepairRunnable.java:261 - Repair session 0dd10210-8155-11f0-a0d3-0975efdf1988 for range [(8707783103061001635,8714442338712568895], (-1339838517096654297,-1329831482630162157], (-491488413420932955,-491320590237577696], (-5273671614794124226,-5252215016759704759], (-4391300689057894011,-4342438392148372077]] finished INFO [RepairJobTask:5] 2025-08-25 01:44:35,681 ActiveRepairService.java:452 - [repair #0dcdcdc0-8155-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-25 01:44:35,686 RepairRunnable.java:343 - Repair command #10 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,584 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,595 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,604 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,610 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,625 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,640 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,651 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,657 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,665 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,711 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,767 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,784 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,823 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,839 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,860 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,877 Validator.java:281 - [repair #0e97d480-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:36,890 ActiveRepairService.java:452 - [repair #0e964de0-8155-11f0-9cab-d364fe637386] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,432 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,451 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,460 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,469 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,480 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,532 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,544 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,561 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,583 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,610 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,622 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,666 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,678 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,690 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,718 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,742 Validator.java:281 - [repair #0f1b0e40-8155-11f0-b872-d9a04db196bc] Sending completed merkle tree to /10.0.0.249 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:37,751 ActiveRepairService.java:452 - [repair #0f19d5c0-8155-11f0-b872-d9a04db196bc] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,272 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,295 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,354 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,368 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,387 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,406 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,415 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,426 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,437 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,492 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,553 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,564 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,573 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,597 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,618 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,626 Validator.java:281 - [repair #13c6bc00-8155-11f0-9cab-d364fe637386] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-25 01:44:45,637 ActiveRepairService.java:452 - [repair #13c55c70-8155-11f0-9cab-d364fe637386] Not a global repair, will not do anticompaction INFO [Repair-Task-12] 2025-08-25 01:44:46,606 RepairRunnable.java:139 - Starting repair command #11 (1495f2e0-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-12] 2025-08-25 01:44:46,618 RepairSession.java:228 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(4640210167327225809,4708828076068576335]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:3] 2025-08-25 01:44:46,639 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:46,640 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,647 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,647 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,653 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,653 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,655 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:46,658 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:44:46,658 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-25 01:44:46,659 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:46,659 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:46,712 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:46,713 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,715 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,715 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,717 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,717 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,723 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:46,723 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-25 01:44:46,723 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-25 01:44:46,723 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:46,724 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:46,727 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:46,728 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,730 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,730 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,735 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,735 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,737 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:46,738 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:46,738 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:46,738 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-08-25 01:44:46,738 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:46,745 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:46,745 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,747 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,747 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,748 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,752 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,755 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:46,756 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:5] 2025-08-25 01:44:46,757 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:44:46,757 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:2] 2025-08-25 01:44:46,757 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:2] 2025-08-25 01:44:46,765 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:46,765 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,769 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,769 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,771 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,771 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,772 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:46,773 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:44:46,773 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:2] 2025-08-25 01:44:46,777 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:44:46,778 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:46,788 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:46,788 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,789 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,789 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,791 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,791 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,794 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:46,794 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-25 01:44:46,795 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-25 01:44:46,794 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-25 01:44:46,795 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:46,797 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:46,798 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,799 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,800 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,806 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,807 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,808 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:46,808 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-25 01:44:46,808 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-08-25 01:44:46,808 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-25 01:44:46,808 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:46,810 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:46,811 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,812 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,812 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,814 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,814 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,815 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:46,815 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:44:46,815 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-25 01:44:46,815 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-08-25 01:44:46,816 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:46,825 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:46,825 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,827 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,827 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,829 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,829 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,831 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:46,832 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:46,832 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-25 01:44:46,832 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-08-25 01:44:46,832 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:46,879 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_repairs (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:46,879 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,881 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,889 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,897 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,900 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,901 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:46,902 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:44:46,902 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:5] 2025-08-25 01:44:46,902 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-25 01:44:46,902 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] running_repairs is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:46,911 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:46,923 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,927 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,927 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,929 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,929 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,931 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:46,932 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:3] 2025-08-25 01:44:46,933 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-08-25 01:44:46,933 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:4] 2025-08-25 01:44:46,934 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] cluster is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:46,936 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:46,936 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,940 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,940 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,943 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,945 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,957 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:5] 2025-08-25 01:44:46,957 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:44:46,957 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:44:46,963 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-25 01:44:46,963 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_run_by_unit is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:46,969 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:46,969 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,973 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,973 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,982 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:46,982 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,001 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:3] 2025-08-25 01:44:47,001 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-25 01:44:47,001 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-08-25 01:44:47,005 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-25 01:44:47,005 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:47,008 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for running_reapers (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:47,008 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,013 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,013 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,019 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,019 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,021 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:47,022 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-25 01:44:47,022 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-25 01:44:47,025 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-25 01:44:47,025 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] running_reapers is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:47,027 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:47,027 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,028 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,029 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,031 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,031 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,037 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:47,037 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:47,038 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:47,038 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-25 01:44:47,038 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_unit_v1 is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:47,045 RepairJob.java:234 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:47,046 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,053 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,053 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,055 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,055 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,056 RepairSession.java:180 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:6] 2025-08-25 01:44:47,056 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:47,056 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-25 01:44:47,057 SyncTask.java:66 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-25 01:44:47,057 RepairJob.java:143 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] repair_schedule_v1 is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:47,057 RepairSession.java:270 - [repair #1497c7a0-8155-11f0-a0d3-0975efdf1988] Session completed successfully INFO [RepairJobTask:3] 2025-08-25 01:44:47,058 RepairRunnable.java:261 - Repair session 1497c7a0-8155-11f0-a0d3-0975efdf1988 for range [(4640210167327225809,4708828076068576335]] finished INFO [RepairJobTask:3] 2025-08-25 01:44:47,058 ActiveRepairService.java:452 - [repair #1495f2e0-8155-11f0-a0d3-0975efdf1988] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-08-25 01:44:47,060 RepairRunnable.java:343 - Repair command #11 finished in 0 seconds INFO [Repair-Task-13] 2025-08-25 01:44:47,450 RepairRunnable.java:139 - Starting repair command #12 (1516bba0-8155-11f0-a0d3-0975efdf1988), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 4, pull repair: false) INFO [Repair-Task-13] 2025-08-25 01:44:47,467 RepairSession.java:228 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] new session: will sync /10.0.0.50, /10.0.0.249, /10.0.0.254 on range [(-8062858676258821040,-8052660704996370808], (6861487511106982971,6887623964072998713], (1637135547132182454,1678071167640808597], (6887623964072998713,6903842824702902789]] for reaper_db.[diagnostic_event_subscription, repair_run, repair_run_by_cluster, leader, snapshot, schema_migration_leader, percent_repaired_by_schedule, repair_run_by_cluster_v2, schema_migration, running_repairs, cluster, repair_run_by_unit, repair_schedule_by_cluster_and_keyspace, running_reapers, repair_unit_v1, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-25 01:44:47,515 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-08-25 01:44:47,515 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,518 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,518 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,520 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,520 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,521 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:47,525 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:2] 2025-08-25 01:44:47,525 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-25 01:44:47,525 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-25 01:44:47,525 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:47,571 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:47,572 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,577 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,577 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,581 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,582 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,584 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:47,586 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:1] 2025-08-25 01:44:47,588 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-08-25 01:44:47,588 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-25 01:44:47,588 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:47,589 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:2] 2025-08-25 01:44:47,589 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] repair_run is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,591 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,592 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,593 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,593 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,594 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:47,596 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-25 01:44:47,596 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-25 01:44:47,596 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-25 01:44:47,596 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-08-25 01:44:47,653 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:47,653 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,655 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,656 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,657 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,657 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,659 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:47,659 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:4] 2025-08-25 01:44:47,659 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:47,659 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-08-25 01:44:47,660 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] leader is fully synced INFO [RepairJobTask:3] 2025-08-25 01:44:47,661 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for snapshot (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-08-25 01:44:47,661 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,662 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,662 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,663 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,663 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,664 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:2] 2025-08-25 01:44:47,665 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:44:47,665 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-25 01:44:47,666 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-25 01:44:47,666 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] snapshot is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:47,667 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration_leader (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:47,667 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,668 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,668 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,671 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,671 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,678 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:47,687 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-25 01:44:47,689 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-08-25 01:44:47,689 RepairJob.java:257 - Validating /10.0.0.249 INFO [RepairJobTask:6] 2025-08-25 01:44:47,689 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-25 01:44:47,689 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-25 01:44:47,690 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] schema_migration_leader is fully synced INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,691 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,691 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,693 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,693 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,695 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:47,697 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:47,697 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:47,697 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-25 01:44:47,697 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-08-25 01:44:47,699 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-08-25 01:44:47,699 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,701 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,701 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,704 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,704 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,705 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:4] 2025-08-25 01:44:47,706 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:44:47,706 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:44:47,706 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-08-25 01:44:47,706 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:1] 2025-08-25 01:44:47,770 RepairJob.java:234 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Requesting merkle trees for schema_migration (to [/10.0.0.249, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-08-25 01:44:47,770 RepairJob.java:257 - Validating /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,772 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.249 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,772 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,774 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,774 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-08-25 01:44:47,776 RepairSession.java:180 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:1] 2025-08-25 01:44:47,776 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-25 01:44:47,776 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.249 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-25 01:44:47,776 SyncTask.java:66 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-25 01:44:47,776 RepairJob.java:143 - [repair #151953b0-8155-11f0-a0d3-0975efdf1988] schema_migration is fully synced