++ LOG_DIR=/var/log/contrail ++ export CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ CONTAINER_LOG_DIR=/var/log/contrail/config-database-cassandra ++ mkdir -p /var/log/contrail/config-database-cassandra ++ log_file=/var/log/contrail/config-database-cassandra/console.log ++ touch /var/log/contrail/config-database-cassandra/console.log ++ chmod 600 /var/log/contrail/config-database-cassandra/console.log ++ exec +++ tee -a /var/log/contrail/config-database-cassandra/console.log +++ date ++ echo 'INFO: =================== Mon Jul 28 03:26:02 UTC 2025 ===================' INFO: =================== Mon Jul 28 03:26:02 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ ip route get 1 +++++ grep -o 'dev.*' +++++ awk '{print $2}' ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ get_cidr_for_nic ens3 +++ command -v ip +++ cut -d / -f 1 +++ local nic=ens3 +++ awk '{print $2}' +++ ip addr show dev ens3 +++ grep 'inet ' +++ head -n 1 ++ DEFAULT_LOCAL_IP=10.0.0.50 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=256 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 256 ]] ++ [[ -z '' ]] +++ mount -t hugetlbfs +++ awk '/pagesize=2M/{print($3)}' +++ tail -n 1 ++ HUGE_PAGES_2MB_DIR= ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=openstack ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=rbac ++ AUTH_MODE=keystone ++ AUTH_PARAMS= ++ SSL_ENABLE=false ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ANALYTICSDB_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ANALYTICS_SNMP_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8081 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8081 ' +++ '[' -n '10.0.0.254:8081 10.0.0.38:8081 10.0.0.50:8081 ' ']' +++ echo '10.0.0.254:8081 10.0.0.38:8081 10.0.0.50:8081' ++ ANALYTICS_SERVERS='10.0.0.254:8081 10.0.0.38:8081 10.0.0.50:8081' +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9042 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9042 ' +++ '[' -n '10.0.0.254:9042 10.0.0.38:9042 10.0.0.50:9042 ' ']' +++ echo '10.0.0.254:9042 10.0.0.38:9042 10.0.0.50:9042' ++ ANALYTICSDB_CQL_SERVERS='10.0.0.254:9042 10.0.0.38:9042 10.0.0.50:9042' ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8086 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8086 ' +++ '[' -n '10.0.0.254:8086 10.0.0.38:8086 10.0.0.50:8086 ' ']' +++ echo '10.0.0.254:8086 10.0.0.38:8086 10.0.0.50:8086' ++ COLLECTOR_SERVERS='10.0.0.254:8086 10.0.0.38:8086 10.0.0.50:8086' ++ CASSANDRA_PORT=9161 ++ CASSANDRA_CQL_PORT=9041 ++ CASSANDRA_SSL_STORAGE_PORT=7013 ++ CASSANDRA_STORAGE_PORT=7012 ++ CASSANDRA_JMX_LOCAL_PORT=7201 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ CONFIGDB_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:8082 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:8082 ' +++ '[' -n '10.0.0.254:8082 10.0.0.38:8082 10.0.0.50:8082 ' ']' +++ echo '10.0.0.254:8082 10.0.0.38:8082 10.0.0.50:8082' ++ CONFIG_SERVERS='10.0.0.254:8082 10.0.0.38:8082 10.0.0.50:8082' +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9161 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9161 ' +++ '[' -n '10.0.0.254:9161 10.0.0.38:9161 10.0.0.50:9161 ' ']' +++ echo '10.0.0.254:9161 10.0.0.38:9161 10.0.0.50:9161' ++ CONFIGDB_SERVERS='10.0.0.254:9161 10.0.0.38:9161 10.0.0.50:9161' +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9041 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9041 ' +++ '[' -n '10.0.0.254:9041 10.0.0.38:9041 10.0.0.50:9041 ' ']' +++ echo '10.0.0.254:9041 10.0.0.38:9041 10.0.0.50:9041' ++ CONFIGDB_CQL_SERVERS='10.0.0.254:9041 10.0.0.38:9041 10.0.0.50:9041' ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=false ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=false ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=false ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=true ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.20.0.17,10.20.0.254,10.20.0.14 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:2181, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+=10.0.0.50:2181, +++ '[' -n 10.0.0.254:2181,10.0.0.38:2181,10.0.0.50:2181, ']' +++ echo 10.0.0.254:2181,10.0.0.38:2181,10.0.0.50:2181 ++ ZOOKEEPER_SERVERS=10.0.0.254:2181,10.0.0.38:2181,10.0.0.50:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:2181 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:2181 ' +++ '[' -n '10.0.0.254:2181 10.0.0.38:2181 10.0.0.50:2181 ' ']' +++ echo '10.0.0.254:2181 10.0.0.38:2181 10.0.0.50:2181' ++ ZOOKEEPER_SERVERS_SPACE_DELIM='10.0.0.254:2181 10.0.0.38:2181 10.0.0.50:2181' ++ RABBITMQ_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+=10.0.0.254:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+=10.0.0.38:5673, +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+=10.0.0.50:5673, +++ '[' -n 10.0.0.254:5673,10.0.0.38:5673,10.0.0.50:5673, ']' +++ echo 10.0.0.254:5673,10.0.0.38:5673,10.0.0.50:5673 ++ RABBITMQ_SERVERS=10.0.0.254:5673,10.0.0.38:5673,10.0.0.50:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=false ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:6379 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:6379 ' +++ '[' -n '10.0.0.254:6379 10.0.0.38:6379 10.0.0.50:6379 ' ']' +++ echo '10.0.0.254:6379 10.0.0.38:6379 10.0.0.50:6379' ++ REDIS_SERVERS='10.0.0.254:6379 10.0.0.38:6379 10.0.0.50:6379' ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=false ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.254 +++ local server_address=10.0.0.254 +++ extended_server_list+='10.0.0.254:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.38 +++ local server_address=10.0.0.38 +++ extended_server_list+='10.0.0.38:9092 ' +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.50 +++ local server_address=10.0.0.50 +++ extended_server_list+='10.0.0.50:9092 ' +++ '[' -n '10.0.0.254:9092 10.0.0.38:9092 10.0.0.50:9092 ' ']' +++ echo '10.0.0.254:9092 10.0.0.38:9092 10.0.0.50:9092' ++ KAFKA_SERVERS='10.0.0.254:9092 10.0.0.38:9092 10.0.0.50:9092' ++ KAFKA_SSL_ENABLE=false ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/v3 ++ KEYSTONE_AUTH_HOST=10.0.0.254 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.254,10.0.0.38,10.0.0.50 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=false ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=false ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=false ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ /v3 == \/\v\2\.\0 ]] ++ [[ openstack == \o\p\e\n\s\t\a\c\k ]] ++ AUTH_MODE=keystone ++ [[ keystone == \k\e\y\s\t\o\n\e ]] ++ AUTH_PARAMS='--admin_password contrail123' ++ AUTH_PARAMS+=' --admin_tenant_name admin' ++ AUTH_PARAMS+=' --admin_user admin' ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled false ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + CONFIG=/etc/cassandra/cassandra.yaml + JVM_OPTIONS_CONFIG=/etc/cassandra/jvm.options + cp /etc/cassandra/cassandra.origin /etc/cassandra/cassandra.yaml + cp /etc/cassandra/jvm.options.origin /etc/cassandra/jvm.options + for i in '{1..10}' ++ find_my_ip_and_order_for_node_list 10.0.0.254,10.0.0.38,10.0.0.50 ++ local servers=10.0.0.254,10.0.0.38,10.0.0.50 ++ local server_list= ++ IFS=, ++ read -ra server_list ++ cut -d ' ' -f 1 +++ get_local_ips +++ tr '\n' , +++ sort +++ awk '/32 host/ { print f } {f=$2}' +++ cat /proc/net/fib_trie +++ grep -vi host +++ uniq ++ local local_ips=,10.0.0.50,10.20.0.14,127.0.0.1,172.17.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.254'\''))' ++ local server_ip=10.0.0.254 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.254 ]] ++ [[ ,10.0.0.50,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.254, ]] ++ (( ord+=1 )) ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.38'\''))' ++ local server_ip=10.0.0.38 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.38 ]] ++ [[ ,10.0.0.50,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.38, ]] ++ (( ord+=1 )) ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.50'\''))' ++ local server_ip=10.0.0.50 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.50 ]] ++ [[ ,10.0.0.50,10.20.0.14,127.0.0.1,172.17.0.1,, =~ ,10\.0\.0\.50, ]] ++ echo 10.0.0.50 3 ++ return + my_ip=10.0.0.50 + '[' -n 10.0.0.50 ']' + break + '[' -z 10.0.0.50 ']' ++ echo 10.0.0.254,10.0.0.38,10.0.0.50 ++ tr , ' ' ++ wc -w + export CASSANDRA_COUNT=3 + CASSANDRA_COUNT=3 ++ echo 10.0.0.254,10.0.0.38,10.0.0.50 ++ sed 's/,/", "/g' + export 'CASSANDRA_CONNECT_POINTS=10.0.0.254", "10.0.0.38", "10.0.0.50' + CASSANDRA_CONNECT_POINTS='10.0.0.254", "10.0.0.38", "10.0.0.50' ++ echo 10.0.0.254,10.0.0.38,10.0.0.50 ++ cut -d , -f 1,2 + export CASSANDRA_SEEDS=10.0.0.254,10.0.0.38 + CASSANDRA_SEEDS=10.0.0.254,10.0.0.38 + export CASSANDRA_LISTEN_ADDRESS=10.0.0.50 + CASSANDRA_LISTEN_ADDRESS=10.0.0.50 + export CASSANDRA_RPC_ADDRESS=10.0.0.50 + CASSANDRA_RPC_ADDRESS=10.0.0.50 + echo 'INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g' INFO: JVM_EXTRA_OPTS=-Xms1g -Xmx2g + for yaml in Xmx Xms ++ echo -Xms1g -Xmx2g ++ sed -n 's/.*\(-Xmx[0-9]*[mMgG]\).*/\1/p' + opt=-Xmx2g + [[ -n -Xmx2g ]] ++ echo -Xms1g -Xmx2g ++ sed 's/-Xmx[0-9]*[mMgG]//g' + JVM_EXTRA_OPTS='-Xms1g ' + sed -i 's/^[#]*-Xmx.*/-Xmx2g/g' /etc/cassandra/jvm.options + for yaml in Xmx Xms ++ echo -Xms1g ++ sed -n 's/.*\(-Xms[0-9]*[mMgG]\).*/\1/p' + opt=-Xms1g + [[ -n -Xms1g ]] ++ sed 's/-Xms[0-9]*[mMgG]//g' ++ echo -Xms1g + JVM_EXTRA_OPTS= + sed -i 's/^[#]*-Xms.*/-Xms1g/g' /etc/cassandra/jvm.options + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201' + is_enabled true + local val=true + [[ true == \t\r\u\e ]] + export LOCAL_JMX=no + LOCAL_JMX=no + export 'JVM_EXTRA_OPTS= -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + JVM_EXTRA_OPTS=' -Dcassandra.rpc_port=9161 -Dcassandra.native_transport_port=9041 -Dcassandra.ssl_storage_port=7013 -Dcassandra.storage_port=7012 -Dcassandra.jmx.local.port=7201 -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access -Dcassandra.jmx.remote.port=7201 -Dcom.sun.management.jmxremote.rmi.port=7201' + is_enabled false + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + cat + change_variable memtable_flush_writers 4 + local VARIABLE_NAME=memtable_flush_writers + local VARIABLE_VALUE=4 + sed -i 's/.*\(memtable_flush_writers\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_compactors 4 + local VARIABLE_NAME=concurrent_compactors + local VARIABLE_VALUE=4 + sed -i 's/.*\(concurrent_compactors\):.*\([0-9a-z]\)/\1: 4/g' /etc/cassandra/cassandra.yaml + change_variable compaction_throughput_mb_per_sec 256 + local VARIABLE_NAME=compaction_throughput_mb_per_sec + local VARIABLE_VALUE=256 + sed -i 's/.*\(compaction_throughput_mb_per_sec\):.*\([0-9a-z]\)/\1: 256/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_reads 64 + local VARIABLE_NAME=concurrent_reads + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_reads\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable concurrent_writes 64 + local VARIABLE_NAME=concurrent_writes + local VARIABLE_VALUE=64 + sed -i 's/.*\(concurrent_writes\):.*\([0-9a-z]\)/\1: 64/g' /etc/cassandra/cassandra.yaml + change_variable memtable_allocation_type offheap_objects + local VARIABLE_NAME=memtable_allocation_type + local VARIABLE_VALUE=offheap_objects + sed -i 's/.*\(memtable_allocation_type\):.*\([0-9a-z]\)/\1: offheap_objects/g' /etc/cassandra/cassandra.yaml + log_levels_map=([SYS_DEBUG]='DEBUG' [SYS_INFO]='INFO' [SYS_NOTICE]='INFO' [SYS_ERROR]="ERROR") + declare -A log_levels_map + log_level=DEBUG + '[' -n DEBUG ']' + sed -i 's/\(; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.50; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.50; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.254,10.0.0.38}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-07-28 03:26:04,504 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-07-28 03:26:04,504 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-07-28 03:26:04,504 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-07-28 03:26:04,532 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-07-28 03:26:04,533 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-07-28 03:26:04,724 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-07-28 03:26:04,759 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-3721-3. INFO [main] 2025-07-28 03:26:04,759 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-07-28 03:26:04,760 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-07-28 03:26:04,761 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4448704(4344K) committed = 4521984(4416K) max = 251658240(245760K) INFO [main] 2025-07-28 03:26:04,761 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 19000768(18555K) committed = 19398656(18944K) max = -1(-1K) INFO [main] 2025-07-28 03:26:04,761 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2209688(2157K) committed = 2359296(2304K) max = 1073741824(1048576K) INFO [main] 2025-07-28 03:26:04,762 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 93992528(91789K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-07-28 03:26:04,762 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-07-28 03:26:04,763 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-07-28 03:26:04,764 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-07-28 03:26:04,765 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-07-28 03:26:04,815 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-07-28 03:26:04,815 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-07-28 03:26:04,815 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-07-28 03:26:04,816 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-07-28 03:26:04,826 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-07-28 03:26:04,827 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-07-28 03:26:04,837 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-07-28 03:26:04,838 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-07-28 03:26:04,839 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-07-28 03:26:04,902 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-07-28 03:26:05,403 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-07-28 03:26:06,353 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-07-28 03:26:06,367 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-07-28 03:26:06,404 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-07-28 03:26:06,416 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-07-28 03:26:06,447 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-07-28 03:26:06,474 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-07-28 03:26:06,482 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-07-28 03:26:06,485 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-07-28 03:26:06,502 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-07-28 03:26:06,521 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-07-28 03:26:06,533 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-07-28 03:26:06,559 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-07-28 03:26:06,565 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-07-28 03:26:06,581 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-07-28 03:26:06,599 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-07-28 03:26:06,614 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-07-28 03:26:06,627 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-07-28 03:26:06,643 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-07-28 03:26:06,655 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-07-28 03:26:06,660 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-07-28 03:26:06,687 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-07-28 03:26:06,695 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-07-28 03:26:06,716 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-07-28 03:26:06,730 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-07-28 03:26:06,948 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-07-28 03:26:07,020 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-07-28 03:26:07,023 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-07-28 03:26:07,026 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-07-28 03:26:07,029 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-07-28 03:26:07,032 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-07-28 03:26:07,035 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-07-28 03:26:07,038 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-07-28 03:26:07,044 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-07-28 03:26:07,047 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-07-28 03:26:07,050 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-07-28 03:26:07,051 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:1] 2025-07-28 03:26:08,201 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:1] 2025-07-28 03:26:08,208 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:1] 2025-07-28 03:26:08,223 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:1] 2025-07-28 03:26:08,224 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:4] 2025-07-28 03:26:08,599 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-07-28 03:26:08,736 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-07-28 03:26:08,874 StorageService.java:607 - Token metadata: INFO [main] 2025-07-28 03:26:08,970 AutoSavingCache.java:174 - Completed loading (7 ms; 5 keys) KeyCache cache INFO [main] 2025-07-28 03:26:08,996 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-07-28 03:26:08,999 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-07-28 03:26:09,028 StorageService.java:607 - Token metadata: INFO [main] 2025-07-28 03:26:09,179 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-07-28 03:26:09,180 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-07-28 03:26:09,180 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-07-28 03:26:09,180 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-07-28 03:26:09,180 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-07-28 03:26:09,234 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-07-28 03:26:09,248 MessagingService.java:761 - Starting Messaging Service on /10.0.0.50:7012 (ens3) WARN [main] 2025-07-28 03:26:09,260 SystemKeyspace.java:1087 - No host ID found, created 2e98bb2f-043c-4467-b430-af4c57dc391f (Note: This should happen exactly once per node). INFO [main] 2025-07-28 03:26:09,282 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.38] 2025-07-28 03:26:09,806 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [HANDSHAKE-/10.0.0.254] 2025-07-28 03:26:12,234 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [main] 2025-07-28 03:26:13,337 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-07-28 03:26:13,339 StorageService.java:822 - Starting up server gossip INFO [main] 2025-07-28 03:26:13,454 StorageService.java:1446 - JOINING: waiting for ring information INFO [HANDSHAKE-/10.0.0.38] 2025-07-28 03:26:14,397 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [GossipStage:1] 2025-07-28 03:26:14,833 Gossiper.java:1055 - Node /10.0.0.38 is now part of the cluster INFO [RequestResponseStage-3] 2025-07-28 03:26:14,858 Gossiper.java:1019 - InetAddress /10.0.0.38 is now UP INFO [GossipStage:1] 2025-07-28 03:26:14,902 TokenMetadata.java:479 - Updating topology for /10.0.0.38 INFO [GossipStage:1] 2025-07-28 03:26:14,906 TokenMetadata.java:479 - Updating topology for /10.0.0.38 INFO [GossipStage:1] 2025-07-28 03:26:14,907 Gossiper.java:1055 - Node /10.0.0.254 is now part of the cluster INFO [RequestResponseStage-1] 2025-07-28 03:26:15,029 Gossiper.java:1019 - InetAddress /10.0.0.254 is now UP WARN [GossipTasks:1] 2025-07-28 03:26:15,391 FailureDetector.java:288 - Not marking nodes down due to local pause of 6476367843 > 5000000000 INFO [InternalResponseStage:1] 2025-07-28 03:26:15,450 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [InternalResponseStage:1] 2025-07-28 03:26:15,453 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [InternalResponseStage:1] 2025-07-28 03:26:15,483 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [InternalResponseStage:1] 2025-07-28 03:26:15,498 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [InternalResponseStage:1] 2025-07-28 03:26:15,500 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [InternalResponseStage:1] 2025-07-28 03:26:15,514 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [InternalResponseStage:1] 2025-07-28 03:26:15,527 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [InternalResponseStage:1] 2025-07-28 03:26:15,540 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [InternalResponseStage:1] 2025-07-28 03:26:15,543 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [InternalResponseStage:1] 2025-07-28 03:26:15,548 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [InternalResponseStage:1] 2025-07-28 03:26:15,553 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [InternalResponseStage:1] 2025-07-28 03:26:15,558 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [HANDSHAKE-/10.0.0.254] 2025-07-28 03:26:15,855 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [main] 2025-07-28 03:26:16,455 StorageService.java:1446 - JOINING: waiting for schema information to complete INFO [main] 2025-07-28 03:26:16,455 StorageService.java:1446 - JOINING: schema complete, ready to bootstrap INFO [main] 2025-07-28 03:26:16,455 StorageService.java:1446 - JOINING: waiting for pending range calculation INFO [main] 2025-07-28 03:26:16,455 StorageService.java:1446 - JOINING: calculation complete, ready to bootstrap INFO [main] 2025-07-28 03:26:16,455 StorageService.java:1446 - JOINING: getting bootstrap token INFO [main] 2025-07-28 03:26:16,456 BootStrapper.java:228 - Generated random tokens. tokens are [3786302826473953501, 4857258975989499342, -4911115718907464650, -1971888663367908275, 3744759543826131968, -6093643207440103900, -4824772140655974986, 6471054812357356918, 7007569559130105989, -7766022006523977394, -6969629972329926030, -1254387902593060048, 1411235739575737690, 7370969852664019061, 2444085925299315974, -6929012703585167619, -1643222037640540153, 5439219323035444873, -8037203723118959724, 8284751172444480408, 6179580559803332561, -559392713952076881, 5940599037870099527, 4665422533965503386, 1042138937001491230, -2916601582181545152, 4186640610630161813, 6722409091413538948, 535941629301441418, -1662748444723675473, 3755470537584880532, 3945455023211674443, 7385671382705886023, 2381564279289781744, 4227517263936377555, -4653752267069769972, -365826050076182421, -812806129597117502, 6459882043229147139, -7353472714553930932, -3606307060006458316, -6027708398243781071, 667393384147493941, 7574173626998417857, -1972174101119645831, 8140283416914118716, -1925661586438069432, -2023748287068683670, -1229117009641248572, -2696685909180791093, -877474934831956303, -5687354886177616292, 1919427721997803555, -1704950326075845103, -1724648715566955749, 8358570761387794486, 1913521320316858104, 241926002559059756, -4852309027068322481, 8777379120317682950, -6270305903558953508, 4423767341367107788, -4256389241114343033, -4055937597323668064, 4919751416119088358, 505379440756434311, 4273202683497345307, -8416957245458401660, -5827650378183270321, -5372530165020556462, -2692194994014028422, 8624856687410000887, 6770214819514785211, -5784455771446157623, -5210193617040853275, -3700480989097221934, -6323351481564877153, -320739005259029147, -5181157497562584997, 1018252379330645036, -7763409604455204244, -3446146630428782582, 7727435549394231920, 365735554183131115, 3951076507248169918, 1254995961740541945, -4218786059376382924, -8413219867607062388, -7775213835849037542, 8391882920300990204, -1527624085087877247, 960132320832538170, -2259425180410730019, 3912896609358536163, 2496560710910618790, 1709218601870821954, -7027354157113960904, 6556396441052104223, -4982326805443189012, 4645896446823523033, -3392424875259294882, 399687393457724647, -3289483984720082104, -9209493793677948309, 7719334317432639809, -2552779243043164430, -1507469800264225160, -8960629229659906959, 2716804655074178526, 6872081010053488420, -5081268279667868368, 5096518587011015996, -6459226180190519469, -3359657660317421745, -9123254725552933484, 2897576284600578469, -3318939086461224642, -6238943281196697636, -8919928198078773815, -6851261749776260358, 1212781779752525448, 4527068799903069390, 1704083041531617641, -659342551044704524, 3701634071881664963, -8472574024828784977, 2005141132770636610, 4372034995014001253, 5135875794716201216, 5557782093068930333, -2815932866029178131, 1888263638540583300, -130410953423778163, 6517682254400032867, 2191916691039117659, 1450118132642119597, -5279543092472999180, -5250611827371964823, 9200154311067471572, -8635147340527500623, -7085459639108374051, -9062627310822920712, -8629067226881227573, -6446964624536102026, 2410629024662865989, -535762115613921219, 7902708547255762331, -5157779787753289577, -6168221134234487510, 1090493515053872305, 7203820963418924082, 5225626432199972366, 6115538007942460244, -4595301690812263138, 6610620424756779096, 6212906586357892115, 593548155341334486, -3702466725823049445, 3226582879050745817, -7948375464335163151, 4824250416306278645, 6184758551463816248, -6591075565578059901, 5793330723252354411, -5441215933853749340, -3325614151842663307, -6461562771333988029, 8605659139189194802, -3175591521489954516, 7847987170643208037, -8186060992190690042, 6558773865283677727, -231803798144267717, 6360300596091637265, -5749391922471945888, 7785771995268476696, -3596116201466672469, -770215040134975560, -2234333237513671557, 6230525780463754063, 4501017568922931648, -8152249972967978141, 502698026452528991, 7573428784835756787, -7270107186243807595, -8280525337995136185, -4526941262238704254, 3315532110140459972, -510910087254753439, -7160055369993875163, -1826405385745933081, -3860699299997884832, -3986288824469222434, -621792107584922568, 9024110872461193099, -2723562230518411936, -5297703001551665099, 6468086662800457831, -8338868244163117666, -843293569133872293, 9057039013451754849, 2675317605673443357, -6281294004720820746, 295117102160851850, -9134368698648634065, -2503791581066408732, -5681007466147537770, 2041690238026567541, 3270617067595269758, -8071381674583935827, 3946262155760059211, 7715403505668774423, -2392829484583779218, 6122730406526679823, 3581978536259962432, -3805554363451250709, 3263367071367890785, -8625799300681780277, -2992696521413847796, -2276924656730732033, 8660543932133194023, 57197738738538241, 2721107665991433849, 3443118234998612346, 3443434499818255334, 996103336765671845, -4622488568236642634, -6780575479542546437, 8470001151263934815, -3453977274181672436, -1442088112180322282, -5108773415058286072, -8300224226130886036, 4077223241032668680, -9067737260499881546, -5457095508158044220, 3100088067683838916, 2472370499725457155, -739436224228108260, -3920131825799301091, 4822616357642974328, -748266760337627662, 4262309266160486184, 5928847484488120464, 8896679102280189456, -3224200396737394251, 4667237002741525135, -197169610020533818, 8772004359071770513, 2624800465067736355, -1777839741936392502, 7564095461451115341, -4836542922759497935, -891362738057411070, 8736546192580417078, 1722160933436317089] INFO [main] 2025-07-28 03:26:16,513 StorageService.java:1446 - JOINING: sleeping 30000 ms for pending range setup INFO [MigrationStage:1] 2025-07-28 03:26:35,230 ViewManager.java:137 - Not submitting build tasks for views in keyspace reaper_db as storage service is not initialized INFO [MigrationStage:1] 2025-07-28 03:26:40,592 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [MigrationStage:1] 2025-07-28 03:26:40,946 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader INFO [MigrationStage:1] 2025-07-28 03:26:42,420 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [MigrationStage:1] 2025-07-28 03:26:43,010 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [MigrationStage:1] 2025-07-28 03:26:43,954 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [MigrationStage:1] 2025-07-28 03:26:44,921 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [MigrationStage:1] 2025-07-28 03:26:46,206 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [main] 2025-07-28 03:26:46,514 StorageService.java:1446 - JOINING: Starting to bootstrap... INFO [main] 2025-07-28 03:26:46,646 StreamResultFuture.java:90 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] Executing streaming plan for Bootstrap INFO [StreamConnectionEstablisher:1] 2025-07-28 03:26:46,650 StreamSession.java:266 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] Starting streaming to /10.0.0.38 INFO [StreamConnectionEstablisher:1] 2025-07-28 03:26:46,654 StreamCoordinator.java:264 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c, ID#0] Beginning stream session with /10.0.0.38 INFO [STREAM-IN-/10.0.0.38:7012] 2025-07-28 03:26:46,813 StreamResultFuture.java:173 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c ID#0] Prepare completed. Receiving 2 files(0.173KiB), sending 0 files(0.000KiB) INFO [StreamConnectionEstablisher:2] 2025-07-28 03:26:46,816 StreamSession.java:266 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] Starting streaming to /10.0.0.254 INFO [StreamConnectionEstablisher:2] 2025-07-28 03:26:46,819 StreamCoordinator.java:264 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c, ID#0] Beginning stream session with /10.0.0.254 INFO [StreamReceiveTask:1] 2025-07-28 03:26:46,914 StreamResultFuture.java:187 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] Session with /10.0.0.38 is complete INFO [MigrationStage:1] 2025-07-28 03:26:46,949 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [STREAM-IN-/10.0.0.254:7012] 2025-07-28 03:26:47,004 StreamResultFuture.java:187 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] Session with /10.0.0.254 is complete INFO [STREAM-IN-/10.0.0.254:7012] 2025-07-28 03:26:47,016 StreamResultFuture.java:219 - [Stream #b0c51b40-6b62-11f0-a203-4fe9959db69c] All sessions completed INFO [main] 2025-07-28 03:26:47,020 StorageService.java:1446 - JOINING: Finish joining ring INFO [STREAM-IN-/10.0.0.254:7012] 2025-07-28 03:26:47,023 StorageService.java:1505 - Bootstrap completed! for the tokens [3786302826473953501, 4857258975989499342, -4911115718907464650, -1971888663367908275, 3744759543826131968, -6093643207440103900, -4824772140655974986, 6471054812357356918, 7007569559130105989, -7766022006523977394, -6969629972329926030, -1254387902593060048, 1411235739575737690, 7370969852664019061, 2444085925299315974, -6929012703585167619, -1643222037640540153, 5439219323035444873, -8037203723118959724, 8284751172444480408, 6179580559803332561, -559392713952076881, 5940599037870099527, 4665422533965503386, 1042138937001491230, -2916601582181545152, 4186640610630161813, 6722409091413538948, 535941629301441418, -1662748444723675473, 3755470537584880532, 3945455023211674443, 7385671382705886023, 2381564279289781744, 4227517263936377555, -4653752267069769972, -365826050076182421, -812806129597117502, 6459882043229147139, -7353472714553930932, -3606307060006458316, -6027708398243781071, 667393384147493941, 7574173626998417857, -1972174101119645831, 8140283416914118716, -1925661586438069432, -2023748287068683670, -1229117009641248572, -2696685909180791093, -877474934831956303, -5687354886177616292, 1919427721997803555, -1704950326075845103, -1724648715566955749, 8358570761387794486, 1913521320316858104, 241926002559059756, -4852309027068322481, 8777379120317682950, -6270305903558953508, 4423767341367107788, -4256389241114343033, -4055937597323668064, 4919751416119088358, 505379440756434311, 4273202683497345307, -8416957245458401660, -5827650378183270321, -5372530165020556462, -2692194994014028422, 8624856687410000887, 6770214819514785211, -5784455771446157623, -5210193617040853275, -3700480989097221934, -6323351481564877153, -320739005259029147, -5181157497562584997, 1018252379330645036, -7763409604455204244, -3446146630428782582, 7727435549394231920, 365735554183131115, 3951076507248169918, 1254995961740541945, -4218786059376382924, -8413219867607062388, -7775213835849037542, 8391882920300990204, -1527624085087877247, 960132320832538170, -2259425180410730019, 3912896609358536163, 2496560710910618790, 1709218601870821954, -7027354157113960904, 6556396441052104223, -4982326805443189012, 4645896446823523033, -3392424875259294882, 399687393457724647, -3289483984720082104, -9209493793677948309, 7719334317432639809, -2552779243043164430, -1507469800264225160, -8960629229659906959, 2716804655074178526, 6872081010053488420, -5081268279667868368, 5096518587011015996, -6459226180190519469, -3359657660317421745, -9123254725552933484, 2897576284600578469, -3318939086461224642, -6238943281196697636, -8919928198078773815, -6851261749776260358, 1212781779752525448, 4527068799903069390, 1704083041531617641, -659342551044704524, 3701634071881664963, -8472574024828784977, 2005141132770636610, 4372034995014001253, 5135875794716201216, 5557782093068930333, -2815932866029178131, 1888263638540583300, -130410953423778163, 6517682254400032867, 2191916691039117659, 1450118132642119597, -5279543092472999180, -5250611827371964823, 9200154311067471572, -8635147340527500623, -7085459639108374051, -9062627310822920712, -8629067226881227573, -6446964624536102026, 2410629024662865989, -535762115613921219, 7902708547255762331, -5157779787753289577, -6168221134234487510, 1090493515053872305, 7203820963418924082, 5225626432199972366, 6115538007942460244, -4595301690812263138, 6610620424756779096, 6212906586357892115, 593548155341334486, -3702466725823049445, 3226582879050745817, -7948375464335163151, 4824250416306278645, 6184758551463816248, -6591075565578059901, 5793330723252354411, -5441215933853749340, -3325614151842663307, -6461562771333988029, 8605659139189194802, -3175591521489954516, 7847987170643208037, -8186060992190690042, 6558773865283677727, -231803798144267717, 6360300596091637265, -5749391922471945888, 7785771995268476696, -3596116201466672469, -770215040134975560, -2234333237513671557, 6230525780463754063, 4501017568922931648, -8152249972967978141, 502698026452528991, 7573428784835756787, -7270107186243807595, -8280525337995136185, -4526941262238704254, 3315532110140459972, -510910087254753439, -7160055369993875163, -1826405385745933081, -3860699299997884832, -3986288824469222434, -621792107584922568, 9024110872461193099, -2723562230518411936, -5297703001551665099, 6468086662800457831, -8338868244163117666, -843293569133872293, 9057039013451754849, 2675317605673443357, -6281294004720820746, 295117102160851850, -9134368698648634065, -2503791581066408732, -5681007466147537770, 2041690238026567541, 3270617067595269758, -8071381674583935827, 3946262155760059211, 7715403505668774423, -2392829484583779218, 6122730406526679823, 3581978536259962432, -3805554363451250709, 3263367071367890785, -8625799300681780277, -2992696521413847796, -2276924656730732033, 8660543932133194023, 57197738738538241, 2721107665991433849, 3443118234998612346, 3443434499818255334, 996103336765671845, -4622488568236642634, -6780575479542546437, 8470001151263934815, -3453977274181672436, -1442088112180322282, -5108773415058286072, -8300224226130886036, 4077223241032668680, -9067737260499881546, -5457095508158044220, 3100088067683838916, 2472370499725457155, -739436224228108260, -3920131825799301091, 4822616357642974328, -748266760337627662, 4262309266160486184, 5928847484488120464, 8896679102280189456, -3224200396737394251, 4667237002741525135, -197169610020533818, 8772004359071770513, 2624800465067736355, -1777839741936392502, 7564095461451115341, -4836542922759497935, -891362738057411070, 8736546192580417078, 1722160933436317089] INFO [main] 2025-07-28 03:26:47,058 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='running_reapers') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_unit_v1') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='schema_migration_leader') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='schema_migration') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_run_by_cluster') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='cluster') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_schedule_v1') INFO [main] 2025-07-28 03:26:47,061 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_schedule_by_cluster_and_keyspace') INFO [main] 2025-07-28 03:26:47,115 Gossiper.java:1692 - Waiting for gossip to settle... INFO [MigrationStage:1] 2025-07-28 03:26:48,064 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [MigrationStage:1] 2025-07-28 03:26:48,877 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [MigrationStage:1] 2025-07-28 03:26:50,034 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [MigrationStage:1] 2025-07-28 03:26:51,053 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [MigrationStage:1] 2025-07-28 03:26:52,034 ColumnFamilyStore.java:411 - Initializing reaper_db.leader INFO [main] 2025-07-28 03:26:55,116 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-07-28 03:26:55,290 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-07-28 03:26:55,345 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-07-28 03:26:55,346 Server.java:156 - Starting listening for CQL clients on /10.0.0.50:9041 (unencrypted)... INFO [main] 2025-07-28 03:26:55,403 ThriftServer.java:116 - Binding thrift service to /10.0.0.50:9161 INFO [Thread-4] 2025-07-28 03:26:55,408 ThriftServer.java:133 - Listening for thrift clients... INFO [MigrationStage:1] 2025-07-28 03:26:56,160 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [MigrationStage:1] 2025-07-28 03:26:56,915 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations INFO [HANDSHAKE-/10.0.0.50] 2025-07-28 03:26:59,928 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.50 INFO [Native-Transport-Requests-2] 2025-07-28 03:27:00,051 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@9159f41[cfId=b8d66230-6b62-11f0-a203-4fe9959db69c,ksName=reaper_db,cfName=diagnostic_event_subscription,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster description export_file_logger export_http_endpoint export_sse events nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[cluster, export_http_endpoint, events, id, export_sse, nodes, export_file_logger, description],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:00,183 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription INFO [MigrationStage:1] 2025-07-28 03:27:03,106 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 INFO [Native-Transport-Requests-2] 2025-07-28 03:27:04,089 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@212937e7[cfId=bb3e8890-6b62-11f0-a203-4fe9959db69c,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:04,256 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 WARN [Native-Transport-Requests-2] 2025-07-28 03:27:04,992 TimeFcts.java:99 - The function 'dateof' is deprecated. Use the function 'toTimestamp' instead. INFO [MigrationStage:1] 2025-07-28 03:27:06,072 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs INFO [Native-Transport-Requests-1] 2025-07-28 03:27:07,096 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@2f305b25[cfId=bd095d80-6b62-11f0-a203-4fe9959db69c,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:07,275 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule INFO [Native-Transport-Requests-1] 2025-07-28 03:27:10,016 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@6f14c026[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@5837669[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-07-28 03:27:13,061 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@6f14c026[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@600d9e84[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-07-28 03:27:13,891 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@22dfd856[cfId=c1163330-6b62-11f0-a203-4fe9959db69c,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:13,986 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table INFO [Native-Transport-Requests-3] 2025-07-28 03:27:15,116 MigrationManager.java:454 - Update table 'reaper_db/cluster' From org.apache.cassandra.config.CFMetaData@18db236c[cfId=b0f022e0-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_contact partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, last_contact, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@57cbf14c[cfId=b0f022e0-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=cluster,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_contact partitioner properties state seed_hosts]],partitionKeyColumns=[name],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[properties, state, name, last_contact, partitioner, seed_hosts],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-07-28 03:27:15,118 MigrationManager.java:454 - Update table 'reaper_db/running_repairs' From org.apache.cassandra.config.CFMetaData@794763cd[cfId=bc56fd70-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2bc21139[cfId=bc56fd70-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-07-28 03:27:15,118 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@6f14c026[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6535644f[cfId=b2cd2040-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-07-28 03:27:15,120 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_by_cluster_and_keyspace' From org.apache.cassandra.config.CFMetaData@7896d14a[cfId=af2db260-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_schedule_by_cluster_and_keyspace,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name, keyspace_name],clusteringColumns=[repair_schedule_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster_name, repair_schedule_id, keyspace_name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@31f49407[cfId=af2db260-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_schedule_by_cluster_and_keyspace,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[cluster_name, keyspace_name],clusteringColumns=[repair_schedule_id],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[cluster_name, repair_schedule_id, keyspace_name],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-5] 2025-07-28 03:27:15,120 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@5ffac729[cfId=b07b19f0-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@69f0f7f0[cfId=b07b19f0-6b62-11f0-bf49-65ab5c561006,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-07-28 03:27:15,120 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_unit' From org.apache.cassandra.config.CFMetaData@7be74596[cfId=b368c400-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@66335c4[cfId=b368c400-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=repair_run_by_unit,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'ALL'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[] | []],partitionKeyColumns=[repair_unit_id],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[repair_unit_id, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-7] 2025-07-28 03:27:15,129 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v1' From org.apache.cassandra.config.CFMetaData@42c0500e[cfId=b21c94f0-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4452528e[cfId=b21c94f0-6b62-11f0-87bc-23d5ed887d69,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=2, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:18,141 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table INFO [MigrationStage:1] 2025-07-28 03:27:22,968 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [Native-Transport-Requests-1] 2025-07-28 03:27:24,829 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5e65541d[cfId=c79b34d0-6b62-11f0-a203-4fe9959db69c,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:24,921 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [Native-Transport-Requests-2] 2025-07-28 03:27:25,965 MigrationManager.java:454 - Update table 'config_db_uuid/obj_fq_name_table' From org.apache.cassandra.config.CFMetaData@2c95b21c[cfId=c6694480-6b62-11f0-bf49-65ab5c561006,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3ebcc13f[cfId=c6694480-6b62-11f0-bf49-65ab5c561006,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:28,026 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [MigrationStage:1] 2025-07-28 03:27:31,122 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [Native-Transport-Requests-1] 2025-07-28 03:27:32,880 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@583d0597[cfId=cc67b100-6b62-11f0-a203-4fe9959db69c,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:33,006 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [Native-Transport-Requests-2] 2025-07-28 03:27:33,990 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/loadbalancer_table' From org.apache.cassandra.config.CFMetaData@5fcf3055[cfId=cb483740-6b62-11f0-87bc-23d5ed887d69,ksName=svc_monitor_keyspace,cfName=loadbalancer_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@59c27f80[cfId=cb483740-6b62-11f0-87bc-23d5ed887d69,ksName=svc_monitor_keyspace,cfName=loadbalancer_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:36,139 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-1] 2025-07-28 03:27:38,923 MigrationManager.java:454 - Update table 'config_db_uuid/obj_shared_table' From org.apache.cassandra.config.CFMetaData@12dd16a0[cfId=c9322880-6b62-11f0-bf49-65ab5c561006,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2ed8a6c0[cfId=c9322880-6b62-11f0-bf49-65ab5c561006,ksName=config_db_uuid,cfName=obj_shared_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-07-28 03:27:42,090 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@6ce5a982[cfId=d1e506a0-6b62-11f0-a203-4fe9959db69c,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:42,336 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [MigrationStage:1] 2025-07-28 03:27:44,010 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [Native-Transport-Requests-5] 2025-07-28 03:27:45,064 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_ip_address_table' From org.apache.cassandra.config.CFMetaData@69f9b838[cfId=d2fa4730-6b62-11f0-87bc-23d5ed887d69,ksName=to_bgp_keyspace,cfName=service_chain_ip_address_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2e0841e4[cfId=d2fa4730-6b62-11f0-87bc-23d5ed887d69,ksName=to_bgp_keyspace,cfName=service_chain_ip_address_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:46,708 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [Native-Transport-Requests-1] 2025-07-28 03:27:47,987 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@da5655f[cfId=d568d630-6b62-11f0-a203-4fe9959db69c,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:27:48,101 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table + curl http://10.0.0.254:8071/webui/login.html % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 1940 100 1940 0 0 14290 0 --:--:-- --:--:-- --:--:-- 14264 100 1940 100 1940 0 0 14284 0 --:--:-- --:--:-- --:--:-- 14264 + export CASSANDRA_REAPER_JMX_KEY + run_service cassandra-reaper + [[ 10.0.0.254 == \1\0\.\0\.\0\.\5\0 ]] + echo 'Reaper started successfully' Reaper started successfully + [[ -n 1999 ]] + [[ -n 1999 ]] + local owner_opts=1999:1999 + mkdir -p /etc/contrail /var/lib/contrail + chown 1999:1999 /etc/contrail /var/lib/contrail + find /etc/contrail -uid 0 -exec chown 1999:1999 '{}' + + chmod 755 /etc/contrail + do_run_service cassandra-reaper + [[ -n 1999 ]] + [[ -n 1999 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 1999 + local user_name=contrail + export HOME=/home/contrail + HOME=/home/contrail + mkdir -p /home/contrail + chown -R 1999:1999 /home/contrail + exec setpriv --reuid 1999 --regid 1999 --clear-groups --no-new-privs cassandra-reaper Looking for reaper under /usr WARN [2025-07-28 03:28:06,970] [main] i.c.ReaperApplication - Reaper is ready to get things done! INFO [Native-Transport-Requests-1] 2025-07-28 03:29:08,113 MigrationManager.java:331 - Create new Keyspace: KeyspaceMetadata{name=dm_keyspace, params=KeyspaceParams{durable_writes=true, replication=ReplicationParams{class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}}, tables=[], views=[], functions=[], types=[]} INFO [MigrationStage:1] 2025-07-28 03:29:10,311 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [Native-Transport-Requests-2] 2025-07-28 03:29:11,947 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@5f20f36c[cfId=07741db0-6b63-11f0-a203-4fe9959db69c,ksName=dm_keyspace,cfName=dm_pr_asn_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:29:12,063 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [MigrationStage:1] 2025-07-28 03:29:14,226 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [Native-Transport-Requests-1] 2025-07-28 03:29:15,000 MigrationManager.java:454 - Update table 'dm_keyspace/dm_ni_ipv6_ll_table' From org.apache.cassandra.config.CFMetaData@1de8de9f[cfId=08b5ec80-6b63-11f0-87bc-23d5ed887d69,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@5209289[cfId=08b5ec80-6b63-11f0-87bc-23d5ed887d69,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@66216785, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-07-28 03:29:16,207 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [HANDSHAKE-/10.0.0.38] 2025-07-28 03:34:25,428 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:25,732 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:34:25,941 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:34:25,986 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,014 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,039 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,100 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,169 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,186 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,251 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,332 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,357 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,446 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,468 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,535 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,599 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,621 Validator.java:281 - [repair #c2618270-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:34:26,639 ActiveRepairService.java:452 - [repair #c2559b90-6b63-11f0-bf49-65ab5c561006] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,785 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,829 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,861 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,924 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,948 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,977 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:27,990 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,007 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,061 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,083 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,099 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,113 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,142 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,164 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,192 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,209 Validator.java:281 - [repair #c3a748e0-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:34:28,223 ActiveRepairService.java:452 - [repair #c3a57420-6b63-11f0-bf49-65ab5c561006] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,726 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,752 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,777 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,795 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,807 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,826 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,844 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,863 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,889 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,911 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,930 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:34:35,988 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:34:36,014 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:36,049 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:34:36,075 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:36,097 Validator.java:281 - [repair #c85ce1b0-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:34:36,114 ActiveRepairService.java:452 - [repair #c859fb80-6b63-11f0-87bc-23d5ed887d69] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,114 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,140 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,163 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,210 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,243 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,262 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,287 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,299 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,316 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,337 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,352 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,411 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,435 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,462 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,477 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,488 Validator.java:281 - [repair #c9b63020-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:34:38,509 ActiveRepairService.java:452 - [repair #c9b39810-6b63-11f0-87bc-23d5ed887d69] Not a global repair, will not do anticompaction INFO [Repair-Task-2] 2025-07-28 03:34:45,648 RepairRunnable.java:139 - Starting repair command #1 (ce5ab1f0-6b63-11f0-a203-4fe9959db69c), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 6, pull repair: false) INFO [Repair-Task-2] 2025-07-28 03:34:45,681 RepairSession.java:228 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] new session: will sync /10.0.0.50, /10.0.0.38, /10.0.0.254 on range [(-843293569133872293,-841972093742267167], (7215189810485907768,7225881696546112953], (4498991980371293601,4501017568922931648], (-2142723840115235299,-2138268126819375847], (3570131855988479216,3581978536259962432], (-1826405385745933081,-1777839741936392502]] for reaper_db.[running_reapers, diagnostic_event_subscription, repair_unit_v1, repair_run_by_cluster, percent_repaired_by_schedule, snapshot, repair_run, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, schema_migration, repair_run_by_unit, repair_schedule_v1, cluster, schema_migration_leader, leader, running_repairs] INFO [RepairJobTask:3] 2025-07-28 03:34:45,843 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:45,844 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,854 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,855 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,862 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,863 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,867 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:45,869 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:1] 2025-07-28 03:34:45,871 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:5] 2025-07-28 03:34:45,871 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:2] 2025-07-28 03:34:45,871 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:45,871 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:1] 2025-07-28 03:34:45,872 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] running_reapers is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,875 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,875 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,885 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,886 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,888 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:45,892 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-07-28 03:34:45,892 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-07-28 03:34:45,893 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-07-28 03:34:45,894 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-07-28 03:34:45,894 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:45,895 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,900 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,900 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,904 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,904 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,916 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:45,919 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:6] 2025-07-28 03:34:45,919 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-07-28 03:34:45,924 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:45,924 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:3] 2025-07-28 03:34:45,924 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,927 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,928 RepairJob.java:270 - Validating /10.0.0.254 INFO [RepairJobTask:6] 2025-07-28 03:34:45,929 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_unit_v1 is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,932 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,932 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,939 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:45,942 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-07-28 03:34:45,942 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-07-28 03:34:45,943 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-07-28 03:34:45,944 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:45,956 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:45,956 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,961 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,961 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,965 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,966 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,970 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:45,971 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:34:45,971 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-07-28 03:34:45,971 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:34:45,971 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:45,973 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:45,973 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,976 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,976 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,978 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,979 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:45,981 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:45,981 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:2] 2025-07-28 03:34:45,982 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:7] 2025-07-28 03:34:45,982 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:34:45,983 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] snapshot is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:46,034 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:46,034 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,037 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,037 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,041 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,041 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,043 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:46,044 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:3] 2025-07-28 03:34:46,043 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:7] 2025-07-28 03:34:46,044 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:3] 2025-07-28 03:34:46,044 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_run is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:46,047 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:46,047 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,049 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,050 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,052 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,052 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,053 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:46,054 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-07-28 03:34:46,054 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-07-28 03:34:46,054 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-07-28 03:34:46,054 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-07-28 03:34:46,058 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-07-28 03:34:46,058 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,061 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,061 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,065 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,065 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,068 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:46,070 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:1] 2025-07-28 03:34:46,070 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-07-28 03:34:46,070 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-07-28 03:34:46,076 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:46,076 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:46,080 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,085 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,085 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,092 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,092 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,094 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:7] 2025-07-28 03:34:46,094 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-07-28 03:34:46,094 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-07-28 03:34:46,094 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:3] 2025-07-28 03:34:46,094 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] schema_migration is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:46,100 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:46,100 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,104 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,104 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,108 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,108 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,110 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:46,111 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-07-28 03:34:46,110 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-07-28 03:34:46,111 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-07-28 03:34:46,112 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-07-28 03:34:46,114 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-07-28 03:34:46,116 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,121 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,121 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,124 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,124 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,127 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:46,127 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:7] 2025-07-28 03:34:46,128 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-07-28 03:34:46,128 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-07-28 03:34:46,128 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] repair_schedule_v1 is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:46,137 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:46,138 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,145 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,145 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,150 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,150 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,153 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:46,154 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:2] 2025-07-28 03:34:46,154 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:7] 2025-07-28 03:34:46,155 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-07-28 03:34:46,155 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] cluster is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:46,157 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:46,158 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,167 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,167 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,172 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,172 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,175 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:46,175 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-07-28 03:34:46,176 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-07-28 03:34:46,176 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-07-28 03:34:46,176 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] schema_migration_leader is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:46,185 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:46,185 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,189 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,189 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,199 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,199 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,201 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:46,202 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:7] 2025-07-28 03:34:46,202 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-07-28 03:34:46,202 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:7] 2025-07-28 03:34:46,202 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] leader is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:46,255 RepairJob.java:234 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:46,255 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,269 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,270 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,276 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,276 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:46,279 RepairSession.java:180 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:46,280 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:3] 2025-07-28 03:34:46,280 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:7] 2025-07-28 03:34:46,280 SyncTask.java:66 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:2] 2025-07-28 03:34:46,283 RepairJob.java:143 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] running_repairs is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:46,284 RepairSession.java:270 - [repair #ce5f6ce0-6b63-11f0-a203-4fe9959db69c] Session completed successfully INFO [RepairJobTask:2] 2025-07-28 03:34:46,285 RepairRunnable.java:261 - Repair session ce5f6ce0-6b63-11f0-a203-4fe9959db69c for range [(-843293569133872293,-841972093742267167], (7215189810485907768,7225881696546112953], (4498991980371293601,4501017568922931648], (-2142723840115235299,-2138268126819375847], (3570131855988479216,3581978536259962432], (-1826405385745933081,-1777839741936392502]] finished INFO [RepairJobTask:2] 2025-07-28 03:34:46,288 ActiveRepairService.java:452 - [repair #ce5ab1f0-6b63-11f0-a203-4fe9959db69c] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-07-28 03:34:46,299 RepairRunnable.java:343 - Repair command #1 finished in 0 seconds INFO [Repair-Task-3] 2025-07-28 03:34:47,959 RepairRunnable.java:139 - Starting repair command #2 (cfbb7a70-6b63-11f0-a203-4fe9959db69c), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 3, pull repair: false) INFO [Repair-Task-3] 2025-07-28 03:34:48,001 RepairSession.java:228 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] new session: will sync /10.0.0.50, /10.0.0.38, /10.0.0.254 on range [(3581978536259962432,3628660809270646951], (5307180798266452298,5309638273345100524], (6115538007942460244,6122730406526679823]] for reaper_db.[running_reapers, diagnostic_event_subscription, repair_unit_v1, repair_run_by_cluster, percent_repaired_by_schedule, snapshot, repair_run, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, schema_migration, repair_run_by_unit, repair_schedule_v1, cluster, schema_migration_leader, leader, running_repairs] INFO [RepairJobTask:3] 2025-07-28 03:34:48,044 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,046 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,049 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,050 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,057 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,058 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,062 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:48,069 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:2] 2025-07-28 03:34:48,069 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:3] 2025-07-28 03:34:48,069 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-07-28 03:34:48,069 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] running_reapers is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:48,072 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,072 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,075 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,075 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,078 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,078 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,081 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,083 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-07-28 03:34:48,084 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-07-28 03:34:48,085 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-07-28 03:34:48,085 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:48,086 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,086 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,090 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,090 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,093 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,094 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,097 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:48,099 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-07-28 03:34:48,099 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-07-28 03:34:48,099 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-07-28 03:34:48,100 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_unit_v1 is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:48,101 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,101 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,108 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,108 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,110 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,110 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,112 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:48,117 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,117 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:2] 2025-07-28 03:34:48,118 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-07-28 03:34:48,118 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-07-28 03:34:48,118 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:1] 2025-07-28 03:34:48,119 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,120 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,120 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,123 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,123 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,125 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,126 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:34:48,126 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-07-28 03:34:48,126 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:34:48,126 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:5] 2025-07-28 03:34:48,131 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:34:48,131 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,133 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,133 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,136 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,136 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,138 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,140 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:34:48,140 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:5] 2025-07-28 03:34:48,140 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:34:48,140 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] snapshot is fully synced INFO [RepairJobTask:4] 2025-07-28 03:34:48,200 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:48,200 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,202 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,204 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,208 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,208 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,213 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,215 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-07-28 03:34:48,215 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:4] 2025-07-28 03:34:48,215 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:5] 2025-07-28 03:34:48,216 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_run is fully synced INFO [RepairJobTask:5] 2025-07-28 03:34:48,228 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:34:48,228 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,231 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,231 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,236 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,236 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,238 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:48,244 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:48,244 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-07-28 03:34:48,244 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-07-28 03:34:48,244 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-07-28 03:34:48,244 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:3] 2025-07-28 03:34:48,245 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster_v2 is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,260 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,260 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,267 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,268 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,293 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,298 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-07-28 03:34:48,298 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-07-28 03:34:48,298 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-07-28 03:34:48,299 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:48,299 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:2] 2025-07-28 03:34:48,299 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_schedule_by_cluster_and_keyspace is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,302 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,302 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,304 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,304 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,307 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:48,308 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:4] 2025-07-28 03:34:48,308 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-07-28 03:34:48,308 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:4] 2025-07-28 03:34:48,308 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] schema_migration is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:48,312 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,312 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,318 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,318 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,323 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,324 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,325 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,325 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-07-28 03:34:48,326 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-07-28 03:34:48,328 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-07-28 03:34:48,328 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_run_by_unit is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:48,330 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:48,330 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,333 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,333 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,335 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,335 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,337 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:48,338 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-07-28 03:34:48,338 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-07-28 03:34:48,338 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-07-28 03:34:48,338 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] repair_schedule_v1 is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:48,342 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:48,342 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,344 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,344 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,346 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,346 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,348 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:48,349 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:3] 2025-07-28 03:34:48,349 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:2] 2025-07-28 03:34:48,349 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:3] 2025-07-28 03:34:48,349 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] cluster is fully synced INFO [RepairJobTask:3] 2025-07-28 03:34:48,351 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:34:48,351 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,353 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,354 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,359 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,359 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,361 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:48,361 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-07-28 03:34:48,361 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-07-28 03:34:48,362 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-07-28 03:34:48,362 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] schema_migration_leader is fully synced INFO [RepairJobTask:6] 2025-07-28 03:34:48,363 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-07-28 03:34:48,363 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,366 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,366 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,369 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,369 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,372 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:48,372 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:5] 2025-07-28 03:34:48,372 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:7] 2025-07-28 03:34:48,373 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-07-28 03:34:48,373 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] leader is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:48,415 RepairJob.java:234 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:48,415 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,420 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,420 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,422 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,422 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:48,425 RepairSession.java:180 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:48,426 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-07-28 03:34:48,426 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-07-28 03:34:48,426 SyncTask.java:66 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:1] 2025-07-28 03:34:48,426 RepairJob.java:143 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] running_repairs is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:48,427 RepairSession.java:270 - [repair #cfc1bc00-6b63-11f0-a203-4fe9959db69c] Session completed successfully INFO [RepairJobTask:1] 2025-07-28 03:34:48,427 RepairRunnable.java:261 - Repair session cfc1bc00-6b63-11f0-a203-4fe9959db69c for range [(3581978536259962432,3628660809270646951], (5307180798266452298,5309638273345100524], (6115538007942460244,6122730406526679823]] finished INFO [RepairJobTask:1] 2025-07-28 03:34:48,428 ActiveRepairService.java:452 - [repair #cfbb7a70-6b63-11f0-a203-4fe9959db69c] Not a global repair, will not do anticompaction INFO [InternalResponseStage:5] 2025-07-28 03:34:48,431 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds INFO [Repair-Task-4] 2025-07-28 03:34:55,769 RepairRunnable.java:139 - Starting repair command #3 (d4633090-6b63-11f0-a203-4fe9959db69c), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 4, pull repair: false) INFO [Repair-Task-4] 2025-07-28 03:34:55,785 RepairSession.java:228 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] new session: will sync /10.0.0.50, /10.0.0.38, /10.0.0.254 on range [(2496560710910618790,2510270563682333576], (7448023030971066699,7477051100875918427], (3722763813389325815,3744759543826131968], (7573455707205395038,7574173626998417857]] for reaper_db.[running_reapers, diagnostic_event_subscription, repair_unit_v1, repair_run_by_cluster, percent_repaired_by_schedule, snapshot, repair_run, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, schema_migration, repair_run_by_unit, repair_schedule_v1, cluster, schema_migration_leader, leader, running_repairs] INFO [RepairJobTask:2] 2025-07-28 03:34:55,843 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:55,844 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,849 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,849 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,853 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,853 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,857 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:55,858 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:2] 2025-07-28 03:34:55,858 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:4] 2025-07-28 03:34:55,858 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:2] 2025-07-28 03:34:55,859 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] running_reapers is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:55,862 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:55,862 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,864 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,865 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,866 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,867 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,868 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:55,869 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-07-28 03:34:55,869 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-07-28 03:34:55,870 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-07-28 03:34:55,870 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] diagnostic_event_subscription is fully synced INFO [RepairJobTask:4] 2025-07-28 03:34:55,873 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:55,873 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,879 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,879 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,885 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,886 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,900 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:55,901 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-07-28 03:34:55,902 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-07-28 03:34:55,902 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-07-28 03:34:55,902 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_unit_v1 is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:55,904 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:55,904 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,910 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,910 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,914 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,915 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,925 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:34:55,927 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-07-28 03:34:55,927 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-07-28 03:34:55,926 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-07-28 03:34:55,928 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster is fully synced INFO [RepairJobTask:5] 2025-07-28 03:34:55,933 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:34:55,933 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,935 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,936 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,942 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,942 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,945 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:34:55,946 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-07-28 03:34:55,946 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-07-28 03:34:55,945 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-07-28 03:34:55,948 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:55,953 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:55,953 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,955 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,956 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,960 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,960 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:55,963 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:55,964 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:2] 2025-07-28 03:34:55,964 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:34:55,964 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:34:55,965 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] snapshot is fully synced INFO [RepairJobTask:4] 2025-07-28 03:34:56,014 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:56,014 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,016 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,016 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,022 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,022 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,024 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:56,024 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:2] 2025-07-28 03:34:56,024 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-07-28 03:34:56,024 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:2] 2025-07-28 03:34:56,024 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_run is fully synced INFO [RepairJobTask:2] 2025-07-28 03:34:56,035 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:34:56,036 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,040 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,040 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,043 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,044 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,049 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:6] 2025-07-28 03:34:56,051 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-07-28 03:34:56,051 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:7] 2025-07-28 03:34:56,051 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-07-28 03:34:56,051 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:56,061 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:56,062 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,064 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,066 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,071 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,071 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,075 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:56,075 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-07-28 03:34:56,075 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-07-28 03:34:56,076 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-07-28 03:34:56,076 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:6] 2025-07-28 03:34:56,078 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-07-28 03:34:56,078 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,080 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,080 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,091 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,092 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,098 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:56,099 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-07-28 03:34:56,099 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:7] 2025-07-28 03:34:56,100 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:1] 2025-07-28 03:34:56,100 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] schema_migration is fully synced INFO [RepairJobTask:1] 2025-07-28 03:34:56,108 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:34:56,108 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,110 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,111 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,114 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,115 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,118 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:1] 2025-07-28 03:34:56,119 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-07-28 03:34:56,119 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-07-28 03:34:56,120 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-07-28 03:34:56,121 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_run_by_unit is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:56,133 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:56,134 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,137 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,137 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,165 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,166 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,169 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:7] 2025-07-28 03:34:56,169 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-07-28 03:34:56,169 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-07-28 03:34:56,169 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-07-28 03:34:56,169 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] repair_schedule_v1 is fully synced INFO [RepairJobTask:4] 2025-07-28 03:34:56,172 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:34:56,172 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,175 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,175 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,177 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,177 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,180 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:7] 2025-07-28 03:34:56,180 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:5] 2025-07-28 03:34:56,180 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:3] 2025-07-28 03:34:56,180 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:5] 2025-07-28 03:34:56,181 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] cluster is fully synced INFO [RepairJobTask:5] 2025-07-28 03:34:56,183 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:34:56,183 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,185 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,185 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,188 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,188 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,189 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:34:56,190 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:7] 2025-07-28 03:34:56,190 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-07-28 03:34:56,191 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:7] 2025-07-28 03:34:56,191 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] schema_migration_leader is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:56,193 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:56,193 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,195 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,195 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,200 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,200 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,202 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:7] 2025-07-28 03:34:56,202 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-07-28 03:34:56,202 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:6] 2025-07-28 03:34:56,202 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:1] 2025-07-28 03:34:56,202 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] leader is fully synced INFO [RepairJobTask:7] 2025-07-28 03:34:56,262 RepairJob.java:234 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:34:56,262 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,265 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,266 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,272 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,272 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:34:56,275 RepairSession.java:180 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:34:56,275 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:7] 2025-07-28 03:34:56,275 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:6] 2025-07-28 03:34:56,275 SyncTask.java:66 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:6] 2025-07-28 03:34:56,276 RepairJob.java:143 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] running_repairs is fully synced INFO [RepairJobTask:6] 2025-07-28 03:34:56,277 RepairSession.java:270 - [repair #d465a190-6b63-11f0-a203-4fe9959db69c] Session completed successfully INFO [RepairJobTask:6] 2025-07-28 03:34:56,278 RepairRunnable.java:261 - Repair session d465a190-6b63-11f0-a203-4fe9959db69c for range [(2496560710910618790,2510270563682333576], (7448023030971066699,7477051100875918427], (3722763813389325815,3744759543826131968], (7573455707205395038,7574173626998417857]] finished INFO [RepairJobTask:6] 2025-07-28 03:34:56,282 ActiveRepairService.java:452 - [repair #d4633090-6b63-11f0-a203-4fe9959db69c] Not a global repair, will not do anticompaction INFO [RepairJobTask:6] 2025-07-28 03:34:56,288 RepairRunnable.java:343 - Repair command #3 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,126 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,144 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,179 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,201 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,223 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,242 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,255 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,273 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,322 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,335 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,348 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,369 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,392 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,405 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,426 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,439 Validator.java:281 - [repair #d5bd9070-6b63-11f0-bf49-65ab5c561006] Sending completed merkle tree to /10.0.0.254 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:34:58,452 ActiveRepairService.java:452 - [repair #d5bbe2c0-6b63-11f0-bf49-65ab5c561006] Not a global repair, will not do anticompaction INFO [Repair-Task-5] 2025-07-28 03:35:05,850 RepairRunnable.java:139 - Starting repair command #4 (da654690-6b63-11f0-a203-4fe9959db69c), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-5] 2025-07-28 03:35:05,918 RepairSession.java:228 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] new session: will sync /10.0.0.50, /10.0.0.38, /10.0.0.254 on range [(-2349803900571038453,-2282172611986764806]] for reaper_db.[running_reapers, diagnostic_event_subscription, repair_unit_v1, repair_run_by_cluster, percent_repaired_by_schedule, snapshot, repair_run, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, schema_migration, repair_run_by_unit, repair_schedule_v1, cluster, schema_migration_leader, leader, running_repairs] INFO [RepairJobTask:2] 2025-07-28 03:35:05,979 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:35:05,980 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:05,987 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:05,987 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:05,995 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:05,995 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,000 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_reapers from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:35:06,001 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:4] 2025-07-28 03:35:06,002 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:3] 2025-07-28 03:35:06,041 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:35:06,041 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:5] 2025-07-28 03:35:06,039 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_reapers INFO [RepairJobTask:1] 2025-07-28 03:35:06,047 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] running_reapers is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,059 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,060 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,063 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,063 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,070 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for diagnostic_event_subscription from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:35:06,071 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:5] 2025-07-28 03:35:06,071 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:6] 2025-07-28 03:35:06,072 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-07-28 03:35:06,072 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] diagnostic_event_subscription is fully synced INFO [RepairJobTask:1] 2025-07-28 03:35:06,080 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:35:06,082 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,086 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,086 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,088 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,091 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,098 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_unit_v1 from /10.0.0.50 INFO [RepairJobTask:2] 2025-07-28 03:35:06,100 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-07-28 03:35:06,100 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_unit_v1 INFO [RepairJobTask:3] 2025-07-28 03:35:06,100 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:5] 2025-07-28 03:35:06,100 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-07-28 03:35:06,113 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:35:06,114 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,116 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,118 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,121 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,121 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,127 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:35:06,127 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-07-28 03:35:06,127 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-07-28 03:35:06,127 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-07-28 03:35:06,128 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-07-28 03:35:06,134 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:35:06,135 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,136 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,137 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,152 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,155 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,172 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for percent_repaired_by_schedule from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:35:06,175 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:35:06,175 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:35:06,175 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-07-28 03:35:06,175 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:3] 2025-07-28 03:35:06,185 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:3] 2025-07-28 03:35:06,186 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,196 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,196 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,199 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,199 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,207 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for snapshot from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:35:06,208 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:5] 2025-07-28 03:35:06,208 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:4] 2025-07-28 03:35:06,209 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for snapshot INFO [RepairJobTask:6] 2025-07-28 03:35:06,209 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] snapshot is fully synced INFO [RepairJobTask:4] 2025-07-28 03:35:06,296 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:4] 2025-07-28 03:35:06,296 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,306 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,306 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,312 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,312 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,315 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:35:06,315 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:5] 2025-07-28 03:35:06,315 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:1] 2025-07-28 03:35:06,315 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run INFO [RepairJobTask:4] 2025-07-28 03:35:06,316 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_run is fully synced INFO [RepairJobTask:6] 2025-07-28 03:35:06,332 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:6] 2025-07-28 03:35:06,332 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,335 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,335 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,343 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,343 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,344 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:35:06,349 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-07-28 03:35:06,349 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-07-28 03:35:06,349 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:1] 2025-07-28 03:35:06,349 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:1] 2025-07-28 03:35:06,357 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:1] 2025-07-28 03:35:06,357 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,368 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,368 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,375 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,375 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,384 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.50 INFO [RepairJobTask:6] 2025-07-28 03:35:06,394 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-07-28 03:35:06,394 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-07-28 03:35:06,397 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-07-28 03:35:06,398 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:35:06,413 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:6] 2025-07-28 03:35:06,413 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_schedule_by_cluster_and_keyspace is fully synced INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,419 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,419 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,421 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,421 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,429 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration from /10.0.0.50 INFO [RepairJobTask:6] 2025-07-28 03:35:06,430 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-07-28 03:35:06,432 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:1] 2025-07-28 03:35:06,431 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration INFO [RepairJobTask:5] 2025-07-28 03:35:06,434 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] schema_migration is fully synced INFO [RepairJobTask:5] 2025-07-28 03:35:06,442 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:35:06,443 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,452 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,452 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,456 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,456 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,457 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_run_by_unit from /10.0.0.50 INFO [RepairJobTask:3] 2025-07-28 03:35:06,459 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:2] 2025-07-28 03:35:06,459 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-07-28 03:35:06,465 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-07-28 03:35:06,465 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-07-28 03:35:06,476 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:35:06,476 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,481 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,481 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,485 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,485 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,503 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for repair_schedule_v1 from /10.0.0.50 INFO [RepairJobTask:6] 2025-07-28 03:35:06,503 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-07-28 03:35:06,503 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-07-28 03:35:06,504 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-07-28 03:35:06,504 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] repair_schedule_v1 is fully synced INFO [RepairJobTask:5] 2025-07-28 03:35:06,518 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:5] 2025-07-28 03:35:06,518 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,523 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,524 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,526 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,526 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,529 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for cluster from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:35:06,529 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:7] 2025-07-28 03:35:06,529 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:6] 2025-07-28 03:35:06,529 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for cluster INFO [RepairJobTask:7] 2025-07-28 03:35:06,530 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] cluster is fully synced INFO [RepairJobTask:7] 2025-07-28 03:35:06,547 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:7] 2025-07-28 03:35:06,547 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,550 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,550 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,561 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,561 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,575 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for schema_migration_leader from /10.0.0.50 INFO [RepairJobTask:6] 2025-07-28 03:35:06,575 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-07-28 03:35:06,575 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for schema_migration_leader INFO [RepairJobTask:7] 2025-07-28 03:35:06,576 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-07-28 03:35:06,576 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] schema_migration_leader is fully synced INFO [RepairJobTask:2] 2025-07-28 03:35:06,585 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:35:06,585 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,590 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,590 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,594 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,594 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,597 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for leader from /10.0.0.50 INFO [RepairJobTask:5] 2025-07-28 03:35:06,598 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:4] 2025-07-28 03:35:06,598 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for leader INFO [RepairJobTask:7] 2025-07-28 03:35:06,603 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:2] 2025-07-28 03:35:06,603 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] leader is fully synced INFO [RepairJobTask:2] 2025-07-28 03:35:06,657 RepairJob.java:234 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.50]) INFO [RepairJobTask:2] 2025-07-28 03:35:06,657 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,670 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,670 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,674 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,674 RepairJob.java:270 - Validating /10.0.0.50 INFO [AntiEntropyStage:1] 2025-07-28 03:35:06,676 RepairSession.java:180 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Received merkle tree for running_repairs from /10.0.0.50 INFO [RepairJobTask:4] 2025-07-28 03:35:06,677 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:3] 2025-07-28 03:35:06,677 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.254 and /10.0.0.50 are consistent for running_repairs INFO [RepairJobTask:2] 2025-07-28 03:35:06,677 SyncTask.java:66 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:3] 2025-07-28 03:35:06,677 RepairJob.java:143 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] running_repairs is fully synced INFO [RepairJobTask:3] 2025-07-28 03:35:06,679 RepairSession.java:270 - [repair #da6fcde0-6b63-11f0-a203-4fe9959db69c] Session completed successfully INFO [RepairJobTask:3] 2025-07-28 03:35:06,679 RepairRunnable.java:261 - Repair session da6fcde0-6b63-11f0-a203-4fe9959db69c for range [(-2349803900571038453,-2282172611986764806]] finished INFO [RepairJobTask:3] 2025-07-28 03:35:06,680 ActiveRepairService.java:452 - [repair #da654690-6b63-11f0-a203-4fe9959db69c] Not a global repair, will not do anticompaction INFO [RepairJobTask:3] 2025-07-28 03:35:06,687 RepairRunnable.java:343 - Repair command #4 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,361 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,394 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,420 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,437 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,479 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,522 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,541 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,560 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,574 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,635 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,657 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,737 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,754 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,780 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,801 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,814 Validator.java:281 - [repair #dbd2b940-6b63-11f0-87bc-23d5ed887d69] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-07-28 03:35:08,831 ActiveRepairService.java:452 - [repair #dbd180c0-6b63-11f0-87bc-23d5ed887d69] Not a global repair, will not do anticompaction