Infinispan distributed not syncing

210 Views Asked by At

I've configured a distributed cache and trying to write values. It works but data not syncing. When I decrement value, only one node works and the other one still have original value. Here is my cacheManager conf

 public CacheManager cacheManager() {
    // Set up a clustered cache manager.
    GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();

    builder.serialization().marshaller(new JavaSerializationMarshaller())
            .whiteList().addRegexps("io.github.bucket4j.*", "java.util.*");

    builder.transport()
                .defaultTransport()
                .clusterName("rate-limiting-cluster")
                .addProperty("configurationFile", "default-jgroups-tcp.xml");


    org.infinispan.configuration.cache.Configuration configuration = new ConfigurationBuilder()
            .clustering()
            .cacheMode(CacheMode.DIST_SYNC)
            .hash()
            .numOwners(2)
            .numSegments(256)
            .l1().enable()
            .build();

    DefaultCacheManager cacheManager = new DefaultCacheManager(builder.build(), configuration);
    cacheManager.defineConfiguration(RateLimitingType.CACHE_BLOCKED.getValue(), configuration);
    return new SpringEmbeddedCacheManager(cacheManager);
}

And here is my jgroups conf

<config xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns="urn:org:jgroups"
    xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/jgroups.xsd">
<TCP bind_port="${TCP_PORT:7800}"
     external_addr="${JGROUPS_EXTERNAL_ADDR:match-interface:global}"
     bind_addr="match-interface:global"

     recv_buf_size="${tcp.recv_buf_size:5M}"
     send_buf_size="${tcp.send_buf_size:1M}"
     max_bundle_size="64K"
     sock_conn_timeout="300"

     thread_naming_pattern="cl"
     thread_pool.min_threads="0"
     thread_pool.max_threads="500"
     thread_pool.keep_alive_time="30000"/>
<org.jgroups.protocols.kubernetes.KUBE_PING
        port_range="0"
        namespace="${KUBE_NAMESPACE:production}"
        labels="${KUBE_LABEL:cluster=rate-limiting-cluster}"
        dump_requests="true"
/>
<MERGE3 min_interval="10000"
        max_interval="30000"/>
<FD_SOCK/>
<FD_ALL timeout="9000" interval="3000"/>
<VERIFY_SUSPECT timeout="1500"/>
<BARRIER/>
<pbcast.NAKACK2 use_mcast_xmit="false"
                discard_delivered_msgs="true"/>
<UNICAST3/>
<pbcast.STABLE desired_avg_gossip="50000"
               max_bytes="4M"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
            view_bundling="true"/>
<UFC max_credits="2M"
     min_threshold="0.4"/>
<MFC max_credits="2M"
     min_threshold="0.4"/>
<FRAG2 frag_size="60K"/>
<!--RSVP resend_interval="2000" timeout="10000"/-->
<pbcast.STATE_TRANSFER/>

Need help guys :) Thanks for your reply

0

There are 0 best solutions below