Friday 16 February 2018

LRU Cache With constant time complexity

Below is the code implementation for a LRU cache with constant time complexity. We use a doubly linked list to maintain the access order of the cache entries. Caches are served from in memoryLinkedHasMap. The doubly linked list maintains least recently access nodes from left side and most recently used in right side.

lruEntry -> A ->B ->C ->D ->mruEntry.


package com.test.lrucache;

public class CacheEntry<K, V> {

private CacheEntry<K, V> next;
private CacheEntry<K, V> previous;
private K key;
private V value;

public CacheEntry(CacheEntry<K, V> previous, CacheEntry<K, V> next, K key, V value) {
super();
this.next = next;
this.previous = previous;
this.key = key;
this.value = value;
}

public CacheEntry<K, V> getNext() {
return next;
}

public void setNext(CacheEntry<K, V> next) {
this.next = next;
}

public CacheEntry<K, V> getPrevious() {
return previous;
}

public void setPrevious(CacheEntry<K, V> previous) {
this.previous = previous;
}

public K getKey() {
return key;
}

public void setKey(K key) {
this.key = key;
}

public V getValue() {
return value;
}

public void setValue(V value) {
this.value = value;
}
}



package com.test.lrucache;

import java.util.LinkedHashMap;
import java.util.Map;

public class LRUCache<K, V> {

private int maxSize;
private Map<K, CacheEntry<K, V>> listPointerMap;
private CacheEntry<K, V> lruEntry;
private CacheEntry<K, V> mruEntry;

public LRUCache(int maxSize) {
this.maxSize = maxSize;
listPointerMap = new LinkedHashMap<>();
// least recently used entry is null
lruEntry = null;
// most recently used entry is also null
mruEntry = lruEntry;
}

public void putCacheEntry(K key, V value) {

if (isFull()) {
// remove from map the left most node
listPointerMap.remove(lruEntry.getKey());
// remove the leftmost node
lruEntry = lruEntry.getNext();
lruEntry.setPrevious(null);
}

CacheEntry<K, V> newEntry = new CacheEntry<K, V>(mruEntry, null, key, value);
if (mruEntry != null) {
mruEntry.setNext(newEntry);
}
if (lruEntry == null) {
lruEntry = mruEntry;
}
mruEntry = newEntry;
listPointerMap.put(key, newEntry);
}

public V getCacheEntry(K key) {

CacheEntry<K, V> entry = listPointerMap.get(key);
// no entry found
if (entry == null) {
return null;
}

// last or most recently used entry so do nothing
if (entry.getKey() == mruEntry.getKey()) {
return entry.getValue();
}

CacheEntry<K, V> nextEntry = entry.getNext();
CacheEntry<K, V> previousEntry = entry.getPrevious();

// first or least recently used entry
if (entry.getKey() == lruEntry.getKey()) {
nextEntry.setPrevious(null);
lruEntry = nextEntry;
}

// somewhere in between
else if (entry.getKey() != mruEntry.getKey()) {
previousEntry.setNext(nextEntry);
nextEntry.setPrevious(previousEntry);
;
}

// Finally move our item to the MRU
entry.setPrevious(mruEntry);
mruEntry.setNext(entry);
mruEntry = entry;
mruEntry.setNext(null);

return entry.getValue();
}

private boolean isFull() {
return listPointerMap.size() == maxSize ? true : false;
}

}

package com.test.lrucache;

public class MainClass {

public static void main(String[] args) {

LRUCache<String, String> cache = new LRUCache<>(3);

cache.putCacheEntry("1", "One");
cache.putCacheEntry("2", "Two");
cache.putCacheEntry("3", "Three");

System.out.println(cache.getCacheEntry("1"));
System.out.println(cache.getCacheEntry("2"));

cache.putCacheEntry("4", "Four");
System.out.println(cache.getCacheEntry("3"));
System.out.println(cache.getCacheEntry("4"));
}

}

Thursday 15 February 2018

FLU Cache

In this blogpost we will se how we can implement a LFU cache in Java.

package com.test.lfucache;

public class CacheEntry<T> {

private T data;
private int frequency;

public CacheEntry() {
}

public CacheEntry(T data, int frequency) {
super();
this.data = data;
this.frequency = frequency;
}

public T getData() {
return data;
}

public void setData(T data) {
this.data = data;
}

public int getFrequency() {
return frequency;
}

public void setFrequency(int frequency) {
this.frequency = frequency;
}


}

The above class represents the cache entry. Each entry has a frequency associated with it which will help us evicting cache object.


package com.test.lfucache;

import java.util.LinkedHashMap;
import java.util.Map;

public class LFUCache<K,V> {
private int maxSize = 10;
private Map<K, CacheEntry<V>> cacheMap;

public LFUCache(int maxSize) {
this.maxSize = maxSize;
cacheMap = new LinkedHashMap<>();
}

public void putCacheEntry(K key, V value) {
if (isFull()) {
K cacheKey = evictCache();
cacheMap.remove(cacheKey);
}
CacheEntry<V> entry = new CacheEntry<>();
entry.setData(value);
entry.setFrequency(0);
cacheMap.put(key, entry);
}

public V getCacheEntry(K key) {
if (cacheMap.containsKey(key)) {
CacheEntry<V> temp = cacheMap.get(key);
temp.setFrequency(temp.getFrequency() + 1);
cacheMap.put(key, temp);
return temp.getData();
}
return null;
}

private boolean isFull() {
return cacheMap.size() == maxSize ? true : false;
}

private K evictCache() {
int minFrequency = Integer.MAX_VALUE;
K key = null;
for (Map.Entry<K, CacheEntry<V>> entry : cacheMap.entrySet()) {
if (entry.getValue().getFrequency() < minFrequency) {
key = entry.getKey();
}
}
return key;
}
}

The above class is the main cache class. We have used a LinkedHashMap to store key and cache entry. We can use ConcurrentHashMap also for storing the key and cache entry. Finally the main class looks like

package com.test.lfucache;

public class MainClass {

public static void main(String[] args) {
LFUCache<String, String> cache = new LFUCache<>(3);
cache.putCacheEntry("1", "One");
cache.putCacheEntry("2", "Two");
cache.putCacheEntry("3", "Three");
System.out.println(cache.getCacheEntry("1"));
System.out.println(cache.getCacheEntry("2"));
cache.putCacheEntry("4", "Four");
System.out.println(cache.getCacheEntry("3"));
System.out.println(cache.getCacheEntry("4"));
}

}

Output :

One
Two
null
Four

The above LFU cache has an O(n) runtime complexity for eviction so worst case data insertion into cache. We can improve the performance with the help of another Map and LinkedList.


Monday 24 April 2017

Microservices Communication Pattern

Context

Synchronous HTTP is a bad choice, even an anti-pattern, for communication between microservices. Synchronous REST is acceptable for public APIs, but internal communication between microservices should be based on asynchronous message-passing. If we have to call other services in order to be able to serve a response to a request from a public client, the overall response time for the public client will be bad, and our service will not be as resilient as it could be, because it is coupled in time to the service it depends on.

If a service needs to trigger some action in another service, do that outside of the request/response cycle.
The preferred choice is to use asynchronous communication. In this pattern the calling service simply publishes it's request (or data) and continues with other work. It is not blocking and waiting for a response after it sent a request, this improves scalability. Problems in another service will not break this service. If other services are temporarily broken the calling service might not be able to complete a process completely, but the calling service is not broken itself.

Thus using the asynchronous pattern the services are more decoupled compared to the synchronous pattern and which preserves the autonomy of the service.


Solution

Implement a microservice communication platform that is asynchnorous, address scalability, loosely coupled with the business logic and fits into cross platform implementation of microservices. Lets see with the help of a message broker ( RabbitMQ), how we can address the issue.

Design



The high level design of the platform contains following components -

Platform Component

The platform component includes dynamically creating the exchanges and queues and routing messages to from exchanges to queues and adding and updating the routing keys for queues.

Message Channel:

This component takes few arguments like broker host, port, username, password and establish a connection with the RabbitMQ cluster. The message channel library (jar) should be a part of the event source which is again a microservice itself.

Event Framework:

Event Framework component abstracts the broker, queue, routing key creation on RabbitMQ cluster. It also supports an annotation library. Distributed events are annotated with @DistributedEvent this events are published to RabbitMQ cluster using an API exposed by this framework. The messages are published to a fanout exchange which route all incoming messages to a data exchange (topic/header exchange). The topic exchange route message to different different queues based on routing keys.

Exchange:

The exchange-exchange binding allows for messages to be sent/routed from one exchange to another exchange. Exchange-exchange binding works more or less the same way as exchange-to-queue binding, the only significant difference from the surface is that both exchanges(source and destination) have to be specified.

Two major advantages of using exchange-exhhange bindings based on experience and what I have found after doing some research are:

Exchange-to-exchange bindings are much more flexible in terms of the topology that you can design, promotes decoupling & reduce binding churn
Exchange-to-exchange bindings are said to be very light weight and as a result help to increase performance.


Queue:

The platform uses queue per service concept which is persistence by nature. Every time a new microservice is created it will have a consumer and a queue dedicated to it. The queue setup process will make sure that the routing keys are associated accordingly so that the queue receives the intended messages. In a microservice cluster one of the instance will be the active consumer thus a centralized processing of all incoming messages.

Payload:
The message payload will be a JSON string. Every event will be converted to a JSON string, the receiver upon receiving the message will deserialize it to appropriate object, thus it will also facilitate cross platform interoperability.

Routing Keys:
Routing keys are important aspect for messages to reach the destination queues. We need maintain a standard naming convention for routing keys like domain.event.action ( user.event.created, user.event.deleted). All the services will update its routing keys to receive intended messages.


Platform Client (Producer):


The producer includes event channel, event framework and event model. The event data (model) is transformed into a JSON before sending it to the exchange with appropriate routing key as explained earlier.


Platform Client (Consumer):


On the consumer side we have a message handler framework which decides the action to be performed once a message has been received at by the consumer based on message type.




Implementation (POC)

Connection Factory
The connection factory abstracts queue connection details. The host, port etc are all abstracted with a default value. The default values can be overwritten by properties specified in a property file. This abstracts the client from configuration details. The client can only concentrate on producing and consuming messages.

Create a file named channel-config.properties with the following contents

connection.host=<host>
connection.port=<port>
connection.username=<user_name>
connection.password=<password>


Event

The distributed events generated at the source are annotated with @DistributedEvent and implements.
An example event looks like

@DistributedEvent
public class TestEvent extends AbstractEvent {
public TestEvent() {

}

}

Publisher

The framework provides a uniform API to publish events. To publish any distributed event we have to instantiate EventPublisher and invoke the publishEvent method with event object.

To publish an event, we just need to write following code snippet

EventPublisher<Event> publisher = new EventPublisher();
publisher.publishEvent(new <test_event>);

Consumer

Every consumer classes has to be annotated with @DistributedConsumer that extends Consumer and override consumeMessage method. The @DistributedConsumer annotation should also include name attribute which is the routing key for the queue. The application on startup scans through all the consumers and starts listening to queue.

To start consumer we have to include following code snippet

EventConsumerStarter.loadContext();

A sample consumer looks like

@DistributedConsumer
public class TestConsumer extends EventConsumer {
@Override
public void consumeMessage(Object object) {
// implemetation
}

}

Spring Framework Integration

The framework provides seamless integration with Spring Events. Spring event publishing API is exposed in ApplicationEventPublisher. The framework implements the interface and if the event is annotated with @DistributedEvent it will publish the event to the distributed message broker. If the event listen by the event listener has a @DistributedEvent annotation it will subscribe to the queue for listening to events.
If Producer java class is a source of event, to use spring events we have to write following code snippet
@Component
public class Producer {
@Autowired
private EventPublisher eventPublisher;
public void createTask() {
eventPublisher.publishEvent(new TaskAssignedEvent());
}
}

References

1. Routing Topologies for Performance and Scalability with RabbitMQ [http://spring.io/blog/2011/04/01/routing-topologies-for-performance-and-scalability-with-rabbitmq/]
2. RabbitMQ – Best Practices For Designing Exchanges, Queues And Bindings? [https://derickbailey.com/2015/09/02/rabbitmq-best-practices-for-designing-exchanges-queues-and-bindings/]
3. RabbitMQ Tutorials [https://www.rabbitmq.com/getstarted.html]
4. Code examples [https://github.com/badalb/messaging-platform/tree/master/messaging-platform]

Tuesday 25 October 2016

Spring Cloud Microservice : Architectural Overview


This blogpost explains a common Microservice Architecture using Spring Cloud Microservice. We will primarily focus on different components Spring Cloud Microservice uses for implementing a Microservice architecture using Spring Boot. The code examples will be covered in a separate blogpost.

Spring Cloud Microservice Architecture
                   

Client : The clients are any application client interested to consume the exposed microservices.

API Gateway: The API Gateway can handle a request by invoking multiple microservices and aggregating the results. It can translate between web protocols such as HTTP and WebSocket and web‑unfriendly protocols that are used internally. Zuul is a JVM based router and server side load balancer by Netflix, Spring Cloud has a nice integration with an embedded Zuul proxy, which can accept request from client and route it to different server and aggregate the result.

Config Service:  Spring Cloud Config provides server and client-side support for externalised configuration in a distributed system. With the Config Server we have a central place to manage external properties for applications across all environments. The concepts on both client and server map identically to the Spring Environment and PropertySource abstractions, so they fit very well with Spring applications, but can be used with any application running in any language. As an application moves through the deployment pipeline from dev to test and into production we can manage the configuration between those environments and be certain that applications have everything they need to run when they migrate.

Service Registration and Discovery (Netflix Eureka): The microservice style of architecture is not so much about building individual services so much as it is making the interactions between services reliable and failure-tolerant.A service registry is a phone book for your microservices. Each service registers itself with the service registry and tells the registry where it lives (host, port, node name) and perhaps other service-specific metadata - things that other services can use to make informed decisions about it.

There are several popular options for service registries. Netflix built and then open-sourced their own service registry, Eureka. Another new, but increasingly popular option is Consul Spring Cloud supports both Eureka and Consul.

Circuit Breaker (Hystrix): Circuit breaker is a design pattern in modern software development. Circuit breaker is used to detect failures and encapsulates logic of preventing a failure to reoccur constantly (during maintenance, temporary external system failure or unexpected system difficulties).

Netflix’s Hystrix library provides an implementation of the Circuit Breaker pattern: when we apply a circuit breaker to a method, Hystrix watches for failing calls to that method, and if failures build up to a threshold, Hystrix opens the circuit so that subsequent calls automatically fail. While the circuit is open, Hystrix redirects calls to the method, and they’re passed on to our specified fallback method.

Ribbon: Ribbon is a client side IPC library that is battle-tested in cloud. It provides the following features
  • Load balancing
  • Fault tolerance
  • Multiple protocol (HTTP, TCP, UDP) support in an asynchronous and reactive model
  • Caching and batching

We build a microservice application that uses Netflix Ribbon and Spring Cloud Netflix to provide client-side load balancing in calls to another microservice.

We will see some code examples using Spring Boot to explore and integrate all these components  in subsequent posts.

[Code Example is available here :- https://github.com/badalb/spring-cloud-microservices]

Spring Cloud Config : Externalise your configuration and avoid server restart !!! code example

Spring Cloud Config provides server and client-side support for externalised configuration in a distributed system. With the Config Server we have a central place to manage external properties for applications across all environments. The concepts on both client and server map identically to the Spring Environment and PropertySource abstractions, so they fit very well with Spring applications, but can be used with any application running in any language. As an application moves through the deployment pipeline from dev to test and into production we can manage the configuration between those environments and be certain that applications have everything they need to run when they migrate.

In the example below we will see how we can externalise and manage configuration from github repository and file system.

Spring Cloud Config Server

The Server provides an HTTP, resource-based API for external configuration. We can create/start a config server with @EnableConfigServer annotation at the Spring Boot application startup class.

Code:

@EnableConfigServer
@SpringBootApplication
public class ConfigServiceApplication {
    
    public static void main(String[] args) {
        SpringApplication.run(ConfigServiceApplication.class, args);
    }
}

application.properties file:
server.port=8888
# if you want to manage config files from local file system uncomment the property bellow 
#spring.cloud.config.server.git.uri=file://<your_local_system_directory>

# if you want to manage config files from github repository then uncomment the property below
# alongwith user name and password
#spring.cloud.config.server.git.uri=<github_url>
spring.cloud.config.server.git.searchPaths=<some-name>
#spring.cloud.config.server.git.username=
#spring.cloud.config.server.git.password=

finally in maven dependencies include 
<dependency>
    <groupId>org.springframework.cloud</groupId>
    <artifactId>spring-cloud-starter-config</artifactId>
    <version>1.2.0.RELEASE</version>
</dependency>

Lets assume that spring.cloud.config.server.git.searchPaths attribute value is sample-config and we are supporting default, dev and production environment, so your github repo/local file system will have three different files like

sample-config.properties
sample-config-dev.properties
sample-config-production.properties

Now start the application and access 

http://localhost:8888/sample-config/defaut

output: {"name":"sample-config","profiles":["defaut"],"label":"master","version":"f57f675e23c02a9e2f8422b01f07302d41d9774f","propertySources":[{"name":"<github-default-url>","source":{"message":"some message for default"}}]}

http://localhost:8888/sample-config/dev

{"name":"sample-config","profiles":["dev"],"label":"master","version":"f57f675e23c02a9e2f8422b01f07302d41d9774f","propertySources":[{"name":"<github-default-url>","source":{"message":"some message for dev"}}, ........... ]}

http://localhost:8888/sample-config/production

{"name":"sample-config","profiles":["production"],"label":"master","version":"f57f675e23c02a9e2f8422b01f07302d41d9774f","propertySources":[{"name":"<github-default-url>","source":{"message":"some message for production"}}, ........ ]}

we are assuming here that we have only one property with key message in the property files.

So our spring cloud config server is now ready to support environment specific configuration properties for the client.

we can also health check with http://localhost:8888/sample-config/health

Spring Cloud Config Client

A Spring Boot application can take immediate advantage of the Spring Config Server. Lets see how -

@SpringBootApplication
@RefreshScope
public class ConfigClientApplication {

    public static void main(String[] args) {
        SpringApplication.run(ConfigClientApplication.class, args);
    }
}

@RestController
class MessageRestController {

    @Value("${message:Hello default}")
    private String message;

    @RequestMapping("/message")
    String getMessage() {
        return this.message;
    }
}

So we have a REST endpoint /message which returns the value of the key message from property files.

application.properties file

server.port=8080
spring.application.name=sample-config # this is the name we used in config server

#URL for config server
spring.cloud.config.uri=http://localhost:8888

# Active profile
spring.profiles.active=dev

finally add the dependency below in pom.xml

<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-config</artifactId>
<version>1.2.0.RELEASE</version>
</dependency>

Lets start the server and access http://localhost:8080/message  [the output will be some message for dev

If we change the spring.profiles.active value the output will be changed accordingly. 

@RefreshScope

If we change the value of any of the key (value of the property files attribute key for any environment) we don't even need to restart the Config Server and Config Client, Just send HTTP POST request to http://localhost:8080/refresh the updated values will be reflected automatically. 

So we can use Spring Cloud config server to - 

1. Externalise the configuration properties (property files for applications) either to file system or github repository.
2. We can get the support of different profiles like dev, production seamlessly.
3. If at runtime we need to change any configuration attribute value we don't even need to restart the server.