diff --git a/.travis.yml b/.travis.yml
index 06007d3..7475684 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,7 @@
 language: go
 
 go:
-  - "1.17.x"
-  - "1.16.x"
+  - tip
 
 git:
   depth: 1
@@ -20,4 +19,4 @@ script:
   - go test -cover -race -count=1 -timeout=30s -run .
   - go test -covermode=count -coverprofile=coverage.out -timeout=90s -run .
   - if test ! -z "$COVERALLS_TOKEN"; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
-  - cd bench; go test -run=Bench.* -bench=. -benchmem; cd ..
\ No newline at end of file
+  - cd bench; go test -run=Bench.* -bench=. -benchmem; cd ..
diff --git a/Readme.md b/README.md
similarity index 100%
rename from Readme.md
rename to README.md
diff --git a/bench/bench_test.go b/bench/bench_test.go
index b4e9d5e..547462e 100644
--- a/bench/bench_test.go
+++ b/bench/bench_test.go
@@ -9,7 +9,7 @@ import (
 )
 
 func BenchmarkCacheSetWithoutTTL(b *testing.B) {
-	cache := ttlcache.NewCache()
+	cache := ttlcache.NewCache[string, string]()
 	defer cache.Close()
 
 	for n := 0; n < b.N; n++ {
@@ -18,7 +18,7 @@ func BenchmarkCacheSetWithoutTTL(b *testing.B) {
 }
 
 func BenchmarkCacheSetWithGlobalTTL(b *testing.B) {
-	cache := ttlcache.NewCache()
+	cache := ttlcache.NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
@@ -28,7 +28,7 @@ func BenchmarkCacheSetWithGlobalTTL(b *testing.B) {
 }
 
 func BenchmarkCacheSetWithTTL(b *testing.B) {
-	cache := ttlcache.NewCache()
+	cache := ttlcache.NewCache[string, string]()
 	defer cache.Close()
 
 	for n := 0; n < b.N; n++ {
diff --git a/cache.go b/cache.go
index 4b2f820..537f3b0 100644
--- a/cache.go
+++ b/cache.go
@@ -1,83 +1,94 @@
 package ttlcache
 
 import (
+	"fmt"
 	"sync"
 	"time"
 
 	"golang.org/x/sync/singleflight"
 )
 
-// CheckExpireCallback is used as a callback for an external check on item expiration
-type CheckExpireCallback func(key string, value interface{}) bool
+// CheckExpireCallback is used as a callback for an external check on item
+// expiration.
+type CheckExpireCallback[K comparable, V any] func(key K, value V) bool
 
-// ExpireCallback is used as a callback on item expiration or when notifying of an item new to the cache
-// Note that ExpireReasonCallback will be the successor of this function in the next major release.
-type ExpireCallback func(key string, value interface{})
+// ExpireCallback is used as a callback on item expiration or when notifying
+// of an item new to the cache.
+// Note that ExpireReasonCallback will be the successor of this function in
+// the next major release.
+type ExpireCallback[K comparable, V any] func(key K, value V)
 
-// ExpireReasonCallback is used as a callback on item expiration with extra information why the item expired.
-type ExpireReasonCallback func(key string, reason EvictionReason, value interface{})
+// ExpireReasonCallback is used as a callback on item expiration with extra
+// information why the item expired.
+type ExpireReasonCallback[K comparable, V any] func(key K, reason EvictionReason, value V)
 
-// LoaderFunction can be supplied to retrieve an item where a cache miss occurs. Supply an item specific ttl or Duration.Zero
-type LoaderFunction func(key string) (data interface{}, ttl time.Duration, err error)
+// LoaderFunction can be supplied to retrieve an item where a cache miss
+// occurs. Supply an item specific ttl or Duration.Zero
+type LoaderFunction[K comparable, V any] func(key K) (value V, ttl time.Duration, err error)
 
 // SimpleCache interface enables a quick-start. Interface for basic usage.
-type SimpleCache interface {
-	Get(key string) (interface{}, error)
-	GetWithTTL(key string) (interface{}, time.Duration, error)
-	Set(key string, data interface{}) error
+type SimpleCache[K comparable, V any] interface {
+	Get(key K) (V, error)
+	GetWithTTL(key K) (V, time.Duration, error)
+	Set(key K, value V) error
 	SetTTL(ttl time.Duration) error
-	SetWithTTL(key string, data interface{}, ttl time.Duration) error
-	Remove(key string) error
+	SetWithTTL(key K, value V, ttl time.Duration) error
+	Remove(key K) error
 	Close() error
 	Purge() error
 }
 
-// Cache is a synchronized map of items that can auto-expire once stale
-type Cache struct {
+// Cache is a synchronized map of items that can auto-expire once stale.
+type Cache[K comparable, V any] struct {
 	// mutex is shared for all operations that need to be safe
 	mutex sync.Mutex
 	// ttl is the global ttl for the cache, can be zero (is infinite)
 	ttl time.Duration
 	// actual item storage
-	items map[string]*item
+	items map[K]*item[K, V]
 	// lock used to avoid fetching a remote item multiple times
 	loaderLock           *singleflight.Group
-	expireCallback       ExpireCallback
-	expireReasonCallback ExpireReasonCallback
-	checkExpireCallback  CheckExpireCallback
-	newItemCallback      ExpireCallback
-	// the queue is used to have an ordered structure to use for expiration and cleanup.
-	priorityQueue          *priorityQueue
+	expireCallback       ExpireCallback[K, V]
+	expireReasonCallback ExpireReasonCallback[K, V]
+	checkExpireCallback  CheckExpireCallback[K, V]
+	newItemCallback      ExpireCallback[K, V]
+	// the queue is used to have an ordered structure to use for
+	// expiration and cleanup.
+	priorityQueue          *priorityQueue[K, V]
 	expirationNotification chan bool
-	// hasNotified is used to not schedule new expiration processing when an request is already pending.
+	// hasNotified is used to not schedule new expiration processing
+	// when a request is already pending.
 	hasNotified      bool
 	expirationTime   time.Time
 	skipTTLExtension bool
 	shutdownSignal   chan (chan struct{})
 	isShutDown       bool
-	loaderFunction   LoaderFunction
+	loaderFunction   LoaderFunction[K, V]
 	sizeLimit        int
 	metrics          Metrics
 }
 
-// EvictionReason is an enum that explains why an item was evicted
+// EvictionReason is an enum that explains why an item was evicted.
 type EvictionReason int
 
 const (
-	// Removed : explicitly removed from cache via API call
+	// Removed : explicitly removed from cache via API call.
 	Removed EvictionReason = iota
-	// EvictedSize : evicted due to exceeding the cache size
+	// EvictedSize : evicted due to exceeding the cache size.
 	EvictedSize
-	// Expired : the time to live is zero and therefore the item is removed
+	// Expired : the time to live is zero and therefore the item is
+	// removed.
 	Expired
-	// Closed : the cache was closed
+	// Closed : the cache was closed.
 	Closed
 )
 
 const (
-	// ErrClosed is raised when operating on a cache where Close() has already been called.
+	// ErrClosed is raised when operating on a cache where Close() has
+	// already been called.
 	ErrClosed = constError("cache already closed")
-	// ErrNotFound indicates that the requested key is not present in the cache
+	// ErrNotFound indicates that the requested key is not present
+	// in the cache
 	ErrNotFound = constError("key not found")
 )
 
@@ -87,13 +98,14 @@ func (err constError) Error() string {
 	return string(err)
 }
 
-func (cache *Cache) getItem(key string) (*item, bool, bool) {
+func (cache *Cache[K, V]) getItem(key K) (*item[K, V], bool, bool) {
 	item, exists := cache.items[key]
 	if !exists || item.expired() {
 		return nil, false, false
 	}
 
-	// no need to change priority queue when skipTTLExtension is true or the item will not expire
+	// no need to change priority queue when skipTTLExtension is true or
+	// the item will not expire
 	if cache.skipTTLExtension || (item.ttl == 0 && cache.ttl == 0) {
 		return item, true, false
 	}
@@ -117,7 +129,7 @@ func (cache *Cache) getItem(key string) (*item, bool, bool) {
 	return item, exists, expirationNotification
 }
 
-func (cache *Cache) startExpirationProcessing() {
+func (cache *Cache[K, V]) startExpirationProcessing() {
 	timer := time.NewTimer(time.Hour)
 	for {
 		var sleepTime time.Duration
@@ -172,27 +184,26 @@ func (cache *Cache) startExpirationProcessing() {
 	}
 }
 
-func (cache *Cache) checkExpirationCallback(item *item, reason EvictionReason) {
+func (cache *Cache[K, V]) checkExpirationCallback(item *item[K, V], reason EvictionReason) {
 	if cache.expireCallback != nil {
-		go cache.expireCallback(item.key, item.data)
+		go cache.expireCallback(item.key, item.value)
 	}
 	if cache.expireReasonCallback != nil {
-		go cache.expireReasonCallback(item.key, reason, item.data)
+		go cache.expireReasonCallback(item.key, reason, item.value)
 	}
 }
 
-func (cache *Cache) removeItem(item *item, reason EvictionReason) {
+func (cache *Cache[K, V]) removeItem(item *item[K, V], reason EvictionReason) {
 	cache.metrics.Evicted++
 	cache.checkExpirationCallback(item, reason)
 	cache.priorityQueue.remove(item)
 	delete(cache.items, item.key)
 }
 
-func (cache *Cache) evictjob(reason EvictionReason) {
+func (cache *Cache[K, V]) evictjob(reason EvictionReason) {
 	// index will only be advanced if the current entry will not be evicted
 	i := 0
 	for item := cache.priorityQueue.items[i]; ; item = cache.priorityQueue.items[i] {
-
 		cache.removeItem(item, reason)
 		if cache.priorityQueue.Len() == 0 {
 			return
@@ -200,13 +211,12 @@ func (cache *Cache) evictjob(reason EvictionReason) {
 	}
 }
 
-func (cache *Cache) cleanjob() {
+func (cache *Cache[K, V]) cleanjob() {
 	// index will only be advanced if the current entry will not be evicted
 	i := 0
 	for item := cache.priorityQueue.items[i]; item.expired(); item = cache.priorityQueue.items[i] {
-
 		if cache.checkExpireCallback != nil {
-			if !cache.checkExpireCallback(item.key, item.data) {
+			if !cache.checkExpireCallback(item.key, item.value) {
 				item.touch()
 				cache.priorityQueue.update(item)
 				i++
@@ -224,9 +234,11 @@ func (cache *Cache) cleanjob() {
 	}
 }
 
-// Close calls Purge after stopping the goroutine that does ttl checking, for a clean shutdown.
-// The cache is no longer cleaning up after the first call to Close, repeated calls are safe and return ErrClosed.
-func (cache *Cache) Close() error {
+// Close calls Purge after stopping the goroutine that does ttl checking,
+// for a clean shutdown.
+// The cache is no longer cleaning up after the first call to Close, repeated
+// calls are safe and return ErrClosed.
+func (cache *Cache[K, V]) Close() error {
 	cache.mutex.Lock()
 	if !cache.isShutDown {
 		cache.isShutDown = true
@@ -244,12 +256,13 @@ func (cache *Cache) Close() error {
 }
 
 // Set is a thread-safe way to add new items to the map.
-func (cache *Cache) Set(key string, data interface{}) error {
-	return cache.SetWithTTL(key, data, ItemExpireWithGlobalTTL)
+func (cache *Cache[K, V]) Set(key K, value V) error {
+	return cache.SetWithTTL(key, value, ItemExpireWithGlobalTTL)
 }
 
-// SetWithTTL is a thread-safe way to add new items to the map with individual ttl.
-func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration) error {
+// SetWithTTL is a thread-safe way to add new items to the map with
+// individual ttl.
+func (cache *Cache[K, V]) SetWithTTL(key K, value V, ttl time.Duration) error {
 	cache.mutex.Lock()
 	if cache.isShutDown {
 		cache.mutex.Unlock()
@@ -263,13 +276,13 @@ func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration)
 	}
 
 	if exists {
-		item.data = data
+		item.value = value
 		item.ttl = ttl
 	} else {
 		if cache.sizeLimit != 0 && len(cache.items) >= cache.sizeLimit {
 			cache.removeItem(cache.priorityQueue.items[0], EvictedSize)
 		}
-		item = newItem(key, data, ttl)
+		item = newItem(key, value, ttl)
 		cache.items[key] = item
 	}
 	cache.metrics.Inserted++
@@ -290,7 +303,7 @@ func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration)
 
 	cache.mutex.Unlock()
 	if !exists && cache.newItemCallback != nil {
-		cache.newItemCallback(key, data)
+		cache.newItemCallback(key, value)
 	}
 
 	// notify expiration only if the latest expire time is changed
@@ -300,41 +313,43 @@ func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration)
 	return nil
 }
 
-// Get is a thread-safe way to lookup items
-// Every lookup, also touches the item, hence extending its life
-func (cache *Cache) Get(key string) (interface{}, error) {
+// Get is a thread-safe way to lookup items.
+// Every lookup, also touches the item, hence extending its life.
+func (cache *Cache[K, V]) Get(key K) (V, error) {
 	return cache.GetByLoader(key, nil)
 }
 
 // GetWithTTL has exactly the same behaviour as Get but also returns
-// the remaining TTL for a specific item at the moment its retrieved
-func (cache *Cache) GetWithTTL(key string) (interface{}, time.Duration, error) {
+// the remaining TTL for a specific item at the moment its retrieved.
+func (cache *Cache[K, V]) GetWithTTL(key K) (V, time.Duration, error) {
 	return cache.GetByLoaderWithTtl(key, nil)
 }
 
-// GetByLoader can take a per key loader function (i.e. to propagate context)
-func (cache *Cache) GetByLoader(key string, customLoaderFunction LoaderFunction) (interface{}, error) {
-	dataToReturn, _, err := cache.GetByLoaderWithTtl(key, customLoaderFunction)
+// GetByLoader can take a per key loader function (i.e. to propagate context).
+func (cache *Cache[K, V]) GetByLoader(key K, customLoaderFunction LoaderFunction[K, V]) (V, error) {
+	valueToReturn, _, err := cache.GetByLoaderWithTtl(key, customLoaderFunction)
 
-	return dataToReturn, err
+	return valueToReturn, err
 }
 
-// GetByLoaderWithTtl can take a per key loader function (i.e. to propagate context)
-func (cache *Cache) GetByLoaderWithTtl(key string, customLoaderFunction LoaderFunction) (interface{}, time.Duration, error) {
+// GetByLoaderWithTtl can take a per key loader function (i.e. to propagate
+// context)
+func (cache *Cache[K, V]) GetByLoaderWithTtl(key K, customLoaderFunction LoaderFunction[K, V]) (V, time.Duration, error) {
 	cache.mutex.Lock()
 	if cache.isShutDown {
 		cache.mutex.Unlock()
-		return nil, 0, ErrClosed
+		var empty V
+		return empty, 0, ErrClosed
 	}
 
 	cache.metrics.Hits++
 	item, exists, triggerExpirationNotification := cache.getItem(key)
 
-	var dataToReturn interface{}
+	var valueToReturn V
 	ttlToReturn := time.Duration(0)
 	if exists {
 		cache.metrics.Retrievals++
-		dataToReturn = item.data
+		valueToReturn = item.value
 		if !cache.skipTTLExtension {
 			ttlToReturn = item.ttl
 		} else {
@@ -361,23 +376,20 @@ func (cache *Cache) GetByLoaderWithTtl(key string, customLoaderFunction LoaderFu
 	}
 
 	if loaderFunction != nil && !exists {
-		type loaderResult struct {
-			data interface{}
-			ttl  time.Duration
-		}
-		ch := cache.loaderLock.DoChan(key, func() (interface{}, error) {
+		loaderKey := fmt.Sprint(key)
+		ch := cache.loaderLock.DoChan(loaderKey, func() (interface{}, error) {
 			// cache is not blocked during io
-			invokeData, ttl, err := cache.invokeLoader(key, loaderFunction)
-			lr := &loaderResult{
-				data: invokeData,
-				ttl:  ttl,
+			invokeValue, ttl, err := cache.invokeLoader(key, loaderFunction)
+			lr := &loaderResult[V]{
+				value: invokeValue,
+				ttl:   ttl,
 			}
 			return lr, err
 		})
 		cache.mutex.Unlock()
 		res := <-ch
-		dataToReturn = res.Val.(*loaderResult).data
-		ttlToReturn = res.Val.(*loaderResult).ttl
+		valueToReturn = res.Val.(*loaderResult[V]).value
+		ttlToReturn = res.Val.(*loaderResult[V]).ttl
 		err = res.Err
 	}
 
@@ -385,10 +397,10 @@ func (cache *Cache) GetByLoaderWithTtl(key string, customLoaderFunction LoaderFu
 		cache.notifyExpiration()
 	}
 
-	return dataToReturn, ttlToReturn, err
+	return valueToReturn, ttlToReturn, err
 }
 
-func (cache *Cache) notifyExpiration() {
+func (cache *Cache[K, V]) notifyExpiration() {
 	cache.mutex.Lock()
 	if cache.hasNotified {
 		cache.mutex.Unlock()
@@ -400,19 +412,21 @@ func (cache *Cache) notifyExpiration() {
 	cache.expirationNotification <- true
 }
 
-func (cache *Cache) invokeLoader(key string, loaderFunction LoaderFunction) (dataToReturn interface{}, ttl time.Duration, err error) {
-	dataToReturn, ttl, err = loaderFunction(key)
+func (cache *Cache[K, V]) invokeLoader(key K, loaderFunction LoaderFunction[K, V]) (valueToReturn V, ttl time.Duration, err error) {
+	valueToReturn, ttl, err = loaderFunction(key)
 	if err == nil {
-		err = cache.SetWithTTL(key, dataToReturn, ttl)
+		err = cache.SetWithTTL(key, valueToReturn, ttl)
 		if err != nil {
-			dataToReturn = nil
+			var empty V
+			valueToReturn = empty
 		}
 	}
-	return dataToReturn, ttl, err
+	return valueToReturn, ttl, err
 }
 
-// Remove removes an item from the cache if it exists, triggers expiration callback when set. Can return ErrNotFound if the entry was not present.
-func (cache *Cache) Remove(key string) error {
+// Remove removes an item from the cache if it exists, triggers expiration
+// callback when set. Can return ErrNotFound if the entry was not present.
+func (cache *Cache[K, V]) Remove(key K) error {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	if cache.isShutDown {
@@ -428,8 +442,9 @@ func (cache *Cache) Remove(key string) error {
 	return nil
 }
 
-// Count returns the number of items in the cache. Returns zero when the cache has been closed.
-func (cache *Cache) Count() int {
+// Count returns the number of items in the cache. Returns zero when the
+// cache has been closed.
+func (cache *Cache[K, V]) Count() int {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 
@@ -440,15 +455,16 @@ func (cache *Cache) Count() int {
 	return length
 }
 
-// GetKeys returns all keys of items in the cache. Returns nil when the cache has been closed.
-func (cache *Cache) GetKeys() []string {
+// GetKeys returns all keys of items in the cache. Returns nil when the cache
+// has been closed.
+func (cache *Cache[K, V]) GetKeys() []K {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 
 	if cache.isShutDown {
 		return nil
 	}
-	keys := make([]string, len(cache.items))
+	keys := make([]K, len(cache.items))
 	i := 0
 	for k := range cache.items {
 		keys[i] = k
@@ -457,26 +473,28 @@ func (cache *Cache) GetKeys() []string {
 	return keys
 }
 
-// GetItems returns a copy of all items in the cache. Returns nil when the cache has been closed.
-func (cache *Cache) GetItems() map[string]interface{} {
+// GetItems returns a copy of all items in the cache. Returns nil when 
+// the cache has been closed.
+func (cache *Cache[K, V]) GetItems() map[K]V {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 
 	if cache.isShutDown {
 		return nil
 	}
-	items := make(map[string]interface{}, len(cache.items))
+	items := make(map[K]V, len(cache.items))
 	for k := range cache.items {
 		item, exists, _ := cache.getItem(k)
 		if exists {
-			items[k] = item.data
+			items[k] = item.value
 		}
 	}
 	return items
 }
 
-// SetTTL sets the global TTL value for items in the cache, which can be overridden at the item level.
-func (cache *Cache) SetTTL(ttl time.Duration) error {
+// SetTTL sets the global TTL value for items in the cache, which can be
+// overridden at the item level.
+func (cache *Cache[K, V]) SetTTL(ttl time.Duration) error {
 	cache.mutex.Lock()
 
 	if cache.isShutDown {
@@ -489,83 +507,89 @@ func (cache *Cache) SetTTL(ttl time.Duration) error {
 	return nil
 }
 
-// SetExpirationCallback sets a callback that will be called when an item expires
-func (cache *Cache) SetExpirationCallback(callback ExpireCallback) {
+// SetExpirationCallback sets a callback that will be called when an item
+// expires.
+func (cache *Cache[K, V]) SetExpirationCallback(callback ExpireCallback[K, V]) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.expireCallback = callback
 }
 
-// SetExpirationReasonCallback sets a callback that will be called when an item expires, includes reason of expiry
-func (cache *Cache) SetExpirationReasonCallback(callback ExpireReasonCallback) {
+// SetExpirationReasonCallback sets a callback that will be called when an
+// item expires, includes reason of expiry.
+func (cache *Cache[K, V]) SetExpirationReasonCallback(callback ExpireReasonCallback[K, V]) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.expireReasonCallback = callback
 }
 
-// SetCheckExpirationCallback sets a callback that will be called when an item is about to expire
-// in order to allow external code to decide whether the item expires or remains for another TTL cycle
-func (cache *Cache) SetCheckExpirationCallback(callback CheckExpireCallback) {
+// SetCheckExpirationCallback sets a callback that will be called when an
+// item is about to expire in order to allow external code to decide whether
+// the item expires or remains for another TTL cycle
+func (cache *Cache[K, V]) SetCheckExpirationCallback(callback CheckExpireCallback[K, V]) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.checkExpireCallback = callback
 }
 
-// SetNewItemCallback sets a callback that will be called when a new item is added to the cache
-func (cache *Cache) SetNewItemCallback(callback ExpireCallback) {
+// SetNewItemCallback sets a callback that will be called when a new item
+// is added to the cache.
+func (cache *Cache[K, V]) SetNewItemCallback(callback ExpireCallback[K, V]) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.newItemCallback = callback
 }
 
-// SkipTTLExtensionOnHit allows the user to change the cache behaviour. When this flag is set to true it will
-// no longer extend TTL of items when they are retrieved using Get, or when their expiration condition is evaluated
+// SkipTTLExtensionOnHit allows the user to change the cache behaviour. When
+// this flag is set to true it will no longer extend TTL of items when they
+// are retrieved using Get, or when their expiration condition is evaluated
 // using SetCheckExpirationCallback.
-func (cache *Cache) SkipTTLExtensionOnHit(value bool) {
+func (cache *Cache[K, V]) SkipTTLExtensionOnHit(value bool) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.skipTTLExtension = value
 }
 
-// SetLoaderFunction allows you to set a function to retrieve cache misses. The signature matches that of the Get function.
-// Additional Get calls on the same key block while fetching is in progress (groupcache style).
-func (cache *Cache) SetLoaderFunction(loader LoaderFunction) {
+// SetLoaderFunction allows you to set a function to retrieve cache misses.
+// The signature matches that of the Get function.
+// Additional Get calls on the same key block while fetching is in progress
+// (groupcache style).
+func (cache *Cache[K, V]) SetLoaderFunction(loader LoaderFunction[K, V]) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.loaderFunction = loader
 }
 
-// Purge will remove all entries
-func (cache *Cache) Purge() error {
+// Purge will remove all entries.
+func (cache *Cache[K, V]) Purge() error {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	if cache.isShutDown {
 		return ErrClosed
 	}
 	cache.metrics.Evicted += int64(len(cache.items))
-	cache.items = make(map[string]*item)
-	cache.priorityQueue = newPriorityQueue()
+	cache.items = make(map[K]*item[K, V])
+	cache.priorityQueue = newPriorityQueue[K, V]()
 	return nil
 }
 
 // SetCacheSizeLimit sets a limit to the amount of cached items.
-// If a new item is getting cached, the closes item to being timed out will be replaced
+// If a new item is getting cached, the closes item to being timed out will
+// be replaced.
 // Set to 0 to turn off
-func (cache *Cache) SetCacheSizeLimit(limit int) {
+func (cache *Cache[K, V]) SetCacheSizeLimit(limit int) {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	cache.sizeLimit = limit
 }
 
-// NewCache is a helper to create instance of the Cache struct
-func NewCache() *Cache {
-
+// NewCache creates a new instance of the Cache type.
+func NewCache[K comparable, V any]() *Cache[K, V] {
 	shutdownChan := make(chan chan struct{})
-
-	cache := &Cache{
-		items:                  make(map[string]*item),
+	cache := &Cache[K, V]{
+		items:                  make(map[K]*item[K, V]),
 		loaderLock:             &singleflight.Group{},
-		priorityQueue:          newPriorityQueue(),
+		priorityQueue:          newPriorityQueue[K, V](),
 		expirationNotification: make(chan bool, 1),
 		expirationTime:         time.Now(),
 		shutdownSignal:         shutdownChan,
@@ -578,15 +602,17 @@ func NewCache() *Cache {
 	return cache
 }
 
-// GetMetrics exposes the metrics of the cache. This is a snapshot copy of the metrics.
-func (cache *Cache) GetMetrics() Metrics {
+// GetMetrics exposes the metrics of the cache. This is a snapshot copy
+// of the metrics.
+func (cache *Cache[K, V]) GetMetrics() Metrics {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	return cache.metrics
 }
 
-// Touch resets the TTL of the key when it exists, returns ErrNotFound if the key is not present.
-func (cache *Cache) Touch(key string) error {
+// Touch resets the TTL of the key when it exists, returns ErrNotFound if
+// the key is not present.
+func (cache *Cache[K, V]) Touch(key K) error {
 	cache.mutex.Lock()
 	defer cache.mutex.Unlock()
 	item, exists := cache.items[key]
@@ -603,3 +629,8 @@ func min(duration time.Duration, second time.Duration) time.Duration {
 	}
 	return second
 }
+
+type loaderResult[V any] struct {
+	value V
+	ttl   time.Duration
+}
diff --git a/cache_test.go b/cache_test.go
index 4d41af7..3e89988 100644
--- a/cache_test.go
+++ b/cache_test.go
@@ -23,7 +23,7 @@ func TestMain(m *testing.M) {
 // The SimpleCache interface enables quick-start.
 func TestCache_SimpleCache(t *testing.T) {
 	t.Parallel()
-	var cache SimpleCache = NewCache()
+	var cache SimpleCache[string, string] = NewCache[string, string]()
 
 	cache.SetTTL(time.Second)
 	cache.Set("k", "v")
@@ -37,22 +37,21 @@ func TestCache_SimpleCache(t *testing.T) {
 func TestCache_GetByLoaderRace(t *testing.T) {
 	t.Skip()
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	cache.SetTTL(time.Microsecond)
 	defer cache.Close()
 
 	loaderInvocations := uint64(0)
 	inFlight := uint64(0)
 
-	globalLoader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	globalLoader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		atomic.AddUint64(&inFlight, 1)
 		atomic.AddUint64(&loaderInvocations, 1)
 		time.Sleep(time.Microsecond)
 		assert.Equal(t, uint64(1), inFlight)
 		defer atomic.AddUint64(&inFlight, ^uint64(0))
 		return "global", 0, nil
-
-	}
+	})
 	cache.SetLoaderFunction(globalLoader)
 
 	for i := 0; i < 1000; i++ {
@@ -78,17 +77,17 @@ func TestCache_GetByLoaderRace(t *testing.T) {
 // This is faciliated by supplying a loder function with Get's.
 func TestCache_GetByLoader(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
-	globalLoader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	globalLoader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "global", 0, nil
-	}
+	})
 	cache.SetLoaderFunction(globalLoader)
 
-	localLoader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	localLoader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "local", 0, nil
-	}
+	})
 
 	key, _ := cache.Get("test")
 	assert.Equal(t, "global", key)
@@ -113,19 +112,19 @@ func TestCache_GetByLoader(t *testing.T) {
 
 func TestCache_GetByLoaderWithTtl(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	globalTtl := time.Duration(time.Minute)
-	globalLoader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	globalLoader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "global", globalTtl, nil
-	}
+	})
 	cache.SetLoaderFunction(globalLoader)
 
 	localTtl := time.Duration(time.Hour)
-	localLoader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	localLoader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "local", localTtl, nil
-	}
+	})
 
 	key, ttl, _ := cache.GetWithTTL("test")
 	assert.Equal(t, "global", key)
@@ -151,14 +150,14 @@ func TestCache_GetByLoaderWithTtl(t *testing.T) {
 // Issue #38: Feature request: ability to know why an expiry has occurred
 func TestCache_textExpirationReasons(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
 	var reason EvictionReason
 	var sync = make(chan struct{})
-	expirationReason := func(key string, evReason EvictionReason, value interface{}) {
+	expirationReason := ExpireReasonCallback[string, string](func(key string, evReason EvictionReason, value string) {
 		reason = evReason
 		sync <- struct{}{}
-	}
+	})
 	cache.SetExpirationReasonCallback(expirationReason)
 
 	cache.SetTTL(time.Millisecond)
@@ -187,7 +186,7 @@ func TestCache_textExpirationReasons(t *testing.T) {
 
 func TestCache_TestTouch(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	lock := sync.Mutex{}
@@ -197,11 +196,11 @@ func TestCache_TestTouch(t *testing.T) {
 	lock.Unlock()
 
 	cache.SkipTTLExtensionOnHit(true)
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, string](func(key string, value string) {
 		lock.Lock()
 		defer lock.Unlock()
 		expired = true
-	})
+	}))
 
 	cache.SetWithTTL("key", "data", time.Millisecond*900)
 	<-time.After(time.Millisecond * 500)
@@ -232,7 +231,7 @@ func TestCache_TestTouch(t *testing.T) {
 // Issue #37: Cache metrics
 func TestCache_TestMetrics(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Second)
@@ -264,17 +263,17 @@ func TestCache_TestMetrics(t *testing.T) {
 // Issue #31: Test that a single fetch is executed with the loader function
 func TestCache_TestSingleFetch(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	var calls int32
 
-	loader := func(key string) (data interface{}, ttl time.Duration, err error) {
+	loader := LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		time.Sleep(time.Millisecond * 100)
 		atomic.AddInt32(&calls, 1)
 		return "data", 0, nil
 
-	}
+	})
 
 	cache.SetLoaderFunction(loader)
 	wg := sync.WaitGroup{}
@@ -294,14 +293,13 @@ func TestCache_TestSingleFetch(t *testing.T) {
 // Issue #30: Removal does not use expiration callback.
 func TestCache_TestRemovalTriggersCallback(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	var sync = make(chan struct{})
-	expiration := func(key string, data interface{}) {
-
+	expiration := ExpireCallback[string, string](func(key string, value string) {
 		sync <- struct{}{}
-	}
+	})
 	cache.SetExpirationCallback(expiration)
 
 	cache.Set("1", "barf")
@@ -313,18 +311,18 @@ func TestCache_TestRemovalTriggersCallback(t *testing.T) {
 // Issue #31: loader function
 func TestCache_TestLoaderFunction(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
-		return nil, 0, ErrNotFound
-	})
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
+		return "", 0, ErrNotFound
+	}))
 
 	_, err := cache.Get("1")
 	assert.Equal(t, ErrNotFound, err)
 
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "1", 0, nil
-	})
+	}))
 
 	value, found := cache.Get("1")
 	assert.Equal(t, nil, found)
@@ -334,22 +332,22 @@ func TestCache_TestLoaderFunction(t *testing.T) {
 
 	value, found = cache.Get("1")
 	assert.Equal(t, ErrClosed, found)
-	assert.Equal(t, nil, value)
+	assert.Zero(t, value)
 }
 
 // Issue #31: edge case where cache is closed when loader function has completed
 func TestCache_TestLoaderFunctionDuringClose(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		cache.Close()
 		return "1", 0, nil
-	})
+	}))
 
 	value, found := cache.Get("1")
 	assert.Equal(t, ErrClosed, found)
-	assert.Equal(t, nil, value)
+	assert.Zero(t, value)
 
 	cache.Close()
 
@@ -358,12 +356,12 @@ func TestCache_TestLoaderFunctionDuringClose(t *testing.T) {
 // Cache sometimes returns key not found under parallel access with a loader function
 func TestCache_TestLoaderFunctionParallelKeyAccess(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		time.Sleep(time.Millisecond * 300)
 		return "1", 1 * time.Nanosecond, nil
-	})
+	}))
 
 	wg := sync.WaitGroup{}
 	errCount := uint64(0)
@@ -389,16 +387,16 @@ func TestCache_TestLoaderFunctionParallelKeyAccess(t *testing.T) {
 // Issue #28: call expirationCallback automatically on cache.Close()
 func TestCache_ExpirationOnClose(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, int]()
 
 	success := make(chan struct{})
 	defer close(success)
 
 	cache.SetTTL(time.Hour * 100)
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, int](func(key string, value int) {
 		t.Logf("%s\t%v", key, value)
 		success <- struct{}{}
-	})
+	}))
 	cache.Set("1", 1)
 	cache.Set("2", 1)
 	cache.Set("3", 1)
@@ -421,12 +419,12 @@ func TestCache_ExpirationOnClose(t *testing.T) {
 
 func TestCache_ModifyAfterClose(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, int]()
 
 	cache.SetTTL(time.Hour * 100)
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, int](func(key string, value int) {
 		t.Logf("%s\t%v", key, value)
-	})
+	}))
 	cache.Set("1", 1)
 	cache.Set("2", 1)
 	cache.Set("3", 1)
@@ -456,7 +454,7 @@ func TestCache_ModifyAfterClose(t *testing.T) {
 // that it can be called in a repeated way without problems.
 func TestCache_MultipleCloseCalls(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
 	cache.SetTTL(time.Millisecond * 100)
 
@@ -479,7 +477,7 @@ func TestCache_MultipleCloseCalls(t *testing.T) {
 func TestCache_SkipTtlExtensionOnHit(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Millisecond * 100)
@@ -504,7 +502,7 @@ func TestCache_SkipTtlExtensionOnHit(t *testing.T) {
 func TestCache_ForRacesAcrossGoroutines(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, bool]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Minute * 1)
@@ -550,7 +548,7 @@ func TestCache_ForRacesAcrossGoroutines(t *testing.T) {
 }
 
 func TestCache_SkipTtlExtensionOnHit_ForRacesAcrossGoroutines(t *testing.T) {
-	cache := NewCache()
+	cache := NewCache[string, bool]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Minute * 1)
@@ -603,13 +601,12 @@ func TestCache_SetCheckExpirationCallback(t *testing.T) {
 	iterated := 0
 	ch := make(chan struct{})
 
-	cacheAD := NewCache()
+	cacheAD := NewCache[string, *int]()
 	defer cacheAD.Close()
 
 	cacheAD.SetTTL(time.Millisecond)
-	cacheAD.SetCheckExpirationCallback(func(key string, value interface{}) bool {
-		v := value.(*int)
-		t.Logf("key=%v, value=%d\n", key, *v)
+	cacheAD.SetCheckExpirationCallback(CheckExpireCallback[string, *int](func(key string, value *int) bool {
+		t.Logf("key=%v, value=%d\n", key, *value)
 		iterated++
 		if iterated == 1 {
 			// this is the breaking test case for issue #14
@@ -617,7 +614,7 @@ func TestCache_SetCheckExpirationCallback(t *testing.T) {
 		}
 		ch <- struct{}{}
 		return true
-	})
+	}))
 
 	i := 2
 	cacheAD.Set("a", &i)
@@ -637,15 +634,15 @@ func TestCache_SetExpirationCallback(t *testing.T) {
 	}
 
 	// Setup the TTL cache
-	cache := NewCache()
+	cache := NewCache[string, A]()
 	defer cache.Close()
 
 	ch := make(chan struct{}, 1024)
 	cache.SetTTL(time.Second * 1)
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, A](func(key string, value A) {
 		t.Logf("This key(%s) has expired\n", key)
 		ch <- struct{}{}
-	})
+	}))
 	for i := 0; i < 1024; i++ {
 		cache.Set(fmt.Sprintf("item_%d", i), A{})
 		time.Sleep(time.Millisecond * 10)
@@ -667,7 +664,7 @@ func TestCache_SetExpirationCallback(t *testing.T) {
 func TestRemovalAndCountDoesNotPanic(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.Set("key", "value")
@@ -680,12 +677,12 @@ func TestRemovalAndCountDoesNotPanic(t *testing.T) {
 func TestRemovalWithTtlDoesNotPanic(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, string](func(key string, value string) {
 		t.Logf("This key(%s) has expired\n", key)
-	})
+	}))
 
 	cache.SetWithTTL("keyWithTTL", "value", time.Duration(2*time.Second))
 	cache.Set("key", "value")
@@ -713,44 +710,44 @@ func TestRemovalWithTtlDoesNotPanic(t *testing.T) {
 func TestCacheIndividualExpirationBiggerThanGlobal(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
 	cache.SetWithTTL("key", "value", time.Duration(100*time.Millisecond))
 	<-time.After(150 * time.Millisecond)
-	data, exists := cache.Get("key")
+	value, exists := cache.Get("key")
 	assert.Equal(t, exists, ErrNotFound, "Expected item to not exist")
-	assert.Nil(t, data, "Expected item to be nil")
+	assert.Zero(t, value, "Expected item to be empty")
 }
 
 func TestCacheGlobalExpirationByGlobal(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.Set("key", "value")
 	<-time.After(50 * time.Millisecond)
-	data, exists := cache.Get("key")
+	value, exists := cache.Get("key")
 	assert.Equal(t, exists, nil, "Expected item to exist in cache")
-	assert.Equal(t, data.(string), "value", "Expected item to have 'value' in value")
+	assert.Equal(t, value, "value", "Expected item to have 'value' in value")
 
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
-	data, exists = cache.Get("key")
+	value, exists = cache.Get("key")
 	assert.Equal(t, exists, nil, "Expected item to exist in cache")
-	assert.Equal(t, data.(string), "value", "Expected item to have 'value' in value")
+	assert.Equal(t, value, "value", "Expected item to have 'value' in value")
 
 	<-time.After(100 * time.Millisecond)
-	data, exists = cache.Get("key")
+	value, exists = cache.Get("key")
 	assert.Equal(t, exists, ErrNotFound, "Expected item to not exist")
-	assert.Nil(t, data, "Expected item to be nil")
+	assert.Zero(t, value, "Expected item to be empty")
 }
 
 func TestCacheGlobalExpiration(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(100 * time.Millisecond))
@@ -764,12 +761,12 @@ func TestCacheGlobalExpiration(t *testing.T) {
 func TestCacheMixedExpirations(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, string](func(key string, value string) {
 		t.Logf("expired: %s", key)
-	})
+	}))
 	cache.Set("key_1", "value")
 	cache.SetTTL(time.Duration(100 * time.Millisecond))
 	cache.Set("key_2", "value")
@@ -780,7 +777,7 @@ func TestCacheMixedExpirations(t *testing.T) {
 func TestCacheIndividualExpiration(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetWithTTL("key", "value", time.Duration(100*time.Millisecond))
@@ -800,24 +797,24 @@ func TestCacheIndividualExpiration(t *testing.T) {
 func TestCacheGet(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
-	data, exists := cache.Get("hello")
+	value, exists := cache.Get("hello")
 	assert.Equal(t, exists, ErrNotFound, "Expected empty cache to return no data")
-	assert.Nil(t, data, "Expected data to be empty")
+	assert.Zero(t, value, "Expected data to be empty")
 
 	cache.Set("hello", "world")
-	data, exists = cache.Get("hello")
-	assert.NotNil(t, data, "Expected data to be not nil")
+	value, exists = cache.Get("hello")
+	assert.NotZero(t, value, "Expected data to be not empty")
 	assert.Equal(t, nil, exists, "Expected data to exist")
-	assert.Equal(t, "world", (data.(string)), "Expected data content to be 'world'")
+	assert.Equal(t, "world", value, "Expected data content to be 'world'")
 }
 
 func TestCacheGetKeys(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	keys := cache.GetKeys()
@@ -832,7 +829,7 @@ func TestCacheGetKeys(t *testing.T) {
 func TestCacheGetItems(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	items := cache.GetItems()
@@ -841,63 +838,63 @@ func TestCacheGetItems(t *testing.T) {
 	cache.Set("hello", "world")
 	items = cache.GetItems()
 	assert.NotEmpty(t, items, "Expected items to be not empty")
-	assert.Equal(t, map[string]interface{}{"hello": "world"}, items, "Expected items to {'hello': 'world'}")
+	assert.Equal(t, map[string]string{"hello": "world"}, items, "Expected items to {'hello': 'world'}")
 }
 
 func TestCacheGetWithTTL(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
-	data, ttl, exists := cache.GetWithTTL("hello")
+	value, ttl, exists := cache.GetWithTTL("hello")
 	assert.Equal(t, exists, ErrNotFound, "Expected empty cache to return no data")
-	assert.Nil(t, data, "Expected data to be empty")
+	assert.Zero(t, value, "Expected data to be empty")
 	assert.Equal(t, int(ttl), 0, "Expected item TTL to be 0")
 
 	cache.Set("hello", "world")
-	data, ttl, exists = cache.GetWithTTL("hello")
-	assert.NotNil(t, data, "Expected data to be not nil")
+	value, ttl, exists = cache.GetWithTTL("hello")
+	assert.NotZero(t, value, "Expected data to be not empty")
 	assert.Equal(t, nil, exists, "Expected data to exist")
-	assert.Equal(t, "world", (data.(string)), "Expected data content to be 'world'")
+	assert.Equal(t, "world", value, "Expected data content to be 'world'")
 	assert.Equal(t, int(ttl), 0, "Expected item TTL to be 0")
 
 	orgttl := time.Duration(500 * time.Millisecond)
 	cache.SetWithTTL("hello", "world", orgttl)
 	time.Sleep(10 * time.Millisecond)
-	data, ttl, exists = cache.GetWithTTL("hello")
-	assert.NotNil(t, data, "Expected data to be not nil")
+	value, ttl, exists = cache.GetWithTTL("hello")
+	assert.NotZero(t, value, "Expected data to be not empty")
 	assert.Equal(t, nil, exists, "Expected data to exist")
-	assert.Equal(t, "world", (data.(string)), "Expected data content to be 'world'")
+	assert.Equal(t, "world", value, "Expected data content to be 'world'")
 	assert.Equal(t, ttl, orgttl, "Expected item TTL to be original TTL")
 
 	cache.SkipTTLExtensionOnHit(true)
 	cache.SetWithTTL("hello", "world", orgttl)
 	time.Sleep(10 * time.Millisecond)
-	data, ttl, exists = cache.GetWithTTL("hello")
-	assert.NotNil(t, data, "Expected data to be not nil")
+	value, ttl, exists = cache.GetWithTTL("hello")
+	assert.NotZero(t, value, "Expected data to be not empty")
 	assert.Equal(t, nil, exists, "Expected data to exist")
-	assert.Equal(t, "world", (data.(string)), "Expected data content to be 'world'")
+	assert.Equal(t, "world", value, "Expected data content to be 'world'")
 	assert.Less(t, ttl, orgttl, "Expected item TTL to be less than the original TTL")
 	assert.NotEqual(t, int(ttl), 0, "Expected item TTL to be not 0")
 }
 
 func TestCache_TestGetWithTTLAndLoaderFunction(t *testing.T) {
 	t.Parallel()
-	cache := NewCache()
+	cache := NewCache[string, string]()
 
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
-		return nil, 0, ErrNotFound
-	})
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
+		return "", 0, ErrNotFound
+	}))
 
 	_, ttl, err := cache.GetWithTTL("1")
 	assert.Equal(t, ErrNotFound, err, "Expected error to be ErrNotFound")
 	assert.Equal(t, int(ttl), 0, "Expected item TTL to be 0")
 
 	orgttl := time.Duration(1 * time.Second)
-	cache.SetLoaderFunction(func(key string) (data interface{}, ttl time.Duration, err error) {
+	cache.SetLoaderFunction(LoaderFunction[string, string](func(key string) (value string, ttl time.Duration, err error) {
 		return "1", orgttl, nil
-	})
+	}))
 
 	value, ttl, found := cache.GetWithTTL("1")
 	assert.Equal(t, nil, found)
@@ -907,7 +904,7 @@ func TestCache_TestGetWithTTLAndLoaderFunction(t *testing.T) {
 
 	value, ttl, found = cache.GetWithTTL("1")
 	assert.Equal(t, ErrClosed, found)
-	assert.Equal(t, nil, value)
+	assert.Zero(t, value)
 	assert.Equal(t, int(ttl), 0, "Expected returned ttl for an ErrClosed err to be 0")
 }
 
@@ -917,15 +914,15 @@ func TestCacheExpirationCallbackFunction(t *testing.T) {
 	expiredCount := 0
 	var lock sync.Mutex
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(500 * time.Millisecond))
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	cache.SetExpirationCallback(ExpireCallback[string, string](func(key string, value string) {
 		lock.Lock()
 		defer lock.Unlock()
 		expiredCount = expiredCount + 1
-	})
+	}))
 	cache.SetWithTTL("key", "value", time.Duration(1000*time.Millisecond))
 	cache.Set("key_2", "value")
 	<-time.After(1100 * time.Millisecond)
@@ -943,22 +940,22 @@ func TestCacheCheckExpirationCallbackFunction(t *testing.T) {
 	expiredCount := 0
 	var lock sync.Mutex
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SkipTTLExtensionOnHit(true)
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
-	cache.SetCheckExpirationCallback(func(key string, value interface{}) bool {
+	cache.SetCheckExpirationCallback(CheckExpireCallback[string, string](func(key string, value string) bool {
 		if key == "key2" || key == "key4" {
 			return true
 		}
 		return false
-	})
-	cache.SetExpirationCallback(func(key string, value interface{}) {
+	}))
+	cache.SetExpirationCallback(ExpireCallback[string, string](func(key string, value string) {
 		lock.Lock()
 		expiredCount = expiredCount + 1
 		lock.Unlock()
-	})
+	}))
 	cache.Set("key", "value")
 	cache.Set("key3", "value")
 	cache.Set("key2", "value")
@@ -974,13 +971,13 @@ func TestCacheNewItemCallbackFunction(t *testing.T) {
 	t.Parallel()
 
 	newItemCount := 0
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
-	cache.SetNewItemCallback(func(key string, value interface{}) {
+	cache.SetNewItemCallback(ExpireCallback[string, string](func(key string, value string) {
 		newItemCount = newItemCount + 1
-	})
+	}))
 	cache.Set("key", "value")
 	cache.Set("key2", "value")
 	cache.Set("key", "value")
@@ -991,7 +988,7 @@ func TestCacheNewItemCallbackFunction(t *testing.T) {
 func TestCacheRemove(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(50 * time.Millisecond))
@@ -1007,28 +1004,27 @@ func TestCacheRemove(t *testing.T) {
 func TestCacheSetWithTTLExistItem(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(100 * time.Millisecond))
 	cache.SetWithTTL("key", "value", time.Duration(50*time.Millisecond))
 	<-time.After(30 * time.Millisecond)
 	cache.SetWithTTL("key", "value2", time.Duration(50*time.Millisecond))
-	data, exists := cache.Get("key")
+	value, exists := cache.Get("key")
 	assert.Equal(t, nil, exists, "Expected 'key' to exist")
-	assert.Equal(t, "value2", data.(string), "Expected 'data' to have value 'value2'")
+	assert.Equal(t, "value2", value, "Expected 'value' to have value 'value2'")
 }
 
 func TestCache_Purge(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(100 * time.Millisecond))
 
 	for i := 0; i < 5; i++ {
-
 		cache.SetWithTTL("key", "value", time.Duration(50*time.Millisecond))
 		<-time.After(30 * time.Millisecond)
 		cache.SetWithTTL("key", "value2", time.Duration(50*time.Millisecond))
@@ -1043,7 +1039,7 @@ func TestCache_Purge(t *testing.T) {
 func TestCache_Limit(t *testing.T) {
 	t.Parallel()
 
-	cache := NewCache()
+	cache := NewCache[string, string]()
 	defer cache.Close()
 
 	cache.SetTTL(time.Duration(100 * time.Second))
diff --git a/go.mod b/go.mod
index c39d9fd..c3944db 100644
--- a/go.mod
+++ b/go.mod
@@ -1,12 +1,17 @@
 module github.com/ReneKroon/ttlcache/v2
 
-go 1.15
+go 1.18
 
 require (
-	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/stretchr/testify v1.7.0
 	go.uber.org/goleak v1.1.10
-	golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
 	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
 	golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 // indirect
+	gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
 )
diff --git a/go.sum b/go.sum
index d1f5162..3e87340 100644
--- a/go.sum
+++ b/go.sum
@@ -22,14 +22,12 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -47,7 +45,6 @@ golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 h1:BmCFkEH4nJrYcAc2L08yX5R
 golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
diff --git a/item.go b/item.go
index 2f78f49..a3f514f 100644
--- a/item.go
+++ b/item.go
@@ -5,40 +5,42 @@ import (
 )
 
 const (
-	// ItemNotExpire Will avoid the item being expired by TTL, but can still be exired by callback etc.
+	// ItemNotExpire Will avoid the item being expired by TTL, but can
+	// still be exired by callback etc.
 	ItemNotExpire time.Duration = -1
 	// ItemExpireWithGlobalTTL will use the global TTL when set.
 	ItemExpireWithGlobalTTL time.Duration = 0
 )
 
-func newItem(key string, data interface{}, ttl time.Duration) *item {
-	item := &item{
-		data: data,
-		ttl:  ttl,
-		key:  key,
+func newItem[K comparable, V any](key K, value V, ttl time.Duration) *item[K, V] {
+	item := &item[K, V]{
+		key:   key,
+		value: value,
+		ttl:   ttl,
 	}
-	// since nobody is aware yet of this item, it's safe to touch without lock here
+	// since nobody is aware yet of this item, it's safe to touch
+	// without lock here
 	item.touch()
 	return item
 }
 
-type item struct {
-	key        string
-	data       interface{}
+type item[K comparable, V any] struct {
+	key        K
+	value      V
 	ttl        time.Duration
 	expireAt   time.Time
 	queueIndex int
 }
 
 // Reset the item expiration time
-func (item *item) touch() {
+func (item *item[K, V]) touch() {
 	if item.ttl > 0 {
 		item.expireAt = time.Now().Add(item.ttl)
 	}
 }
 
 // Verify if the item is expired
-func (item *item) expired() bool {
+func (item *item[K, V]) expired() bool {
 	if item.ttl <= 0 {
 		return false
 	}
diff --git a/metrics.go b/metrics.go
index 5f672b1..de90446 100644
--- a/metrics.go
+++ b/metrics.go
@@ -1,6 +1,7 @@
 package ttlcache
 
-// Metrics contains common cache metrics so you can calculate hit and miss rates
+// Metrics contains common cache metrics so you can calculate hit and miss
+// rates.
 type Metrics struct {
 	// succesful inserts
 	Inserted int64
diff --git a/priority_queue.go b/priority_queue.go
index 5d40548..dc751e8 100644
--- a/priority_queue.go
+++ b/priority_queue.go
@@ -4,21 +4,21 @@ import (
 	"container/heap"
 )
 
-func newPriorityQueue() *priorityQueue {
-	queue := &priorityQueue{}
+func newPriorityQueue[K comparable, V any]() *priorityQueue[K, V] {
+	queue := &priorityQueue[K, V]{}
 	heap.Init(queue)
 	return queue
 }
 
-type priorityQueue struct {
-	items []*item
+type priorityQueue[K comparable, V any] struct {
+	items []*item[K, V]
 }
 
-func (pq *priorityQueue) isEmpty() bool {
+func (pq *priorityQueue[K, V]) isEmpty() bool {
 	return len(pq.items) == 0
 }
 
-func (pq *priorityQueue) root() *item {
+func (pq *priorityQueue[K, V]) root() *item[K, V] {
 	if len(pq.items) == 0 {
 		return nil
 	}
@@ -26,32 +26,33 @@ func (pq *priorityQueue) root() *item {
 	return pq.items[0]
 }
 
-func (pq *priorityQueue) update(item *item) {
+func (pq *priorityQueue[K, V]) update(item *item[K, V]) {
 	heap.Fix(pq, item.queueIndex)
 }
 
-func (pq *priorityQueue) push(item *item) {
+func (pq *priorityQueue[K, V]) push(item *item[K, V]) {
 	heap.Push(pq, item)
 }
 
-func (pq *priorityQueue) pop() *item {
+func (pq *priorityQueue[K, V]) pop() *item[K, V] {
 	if pq.Len() == 0 {
 		return nil
 	}
-	return heap.Pop(pq).(*item)
+	return heap.Pop(pq).(*item[K, V])
 }
 
-func (pq *priorityQueue) remove(item *item) {
+func (pq *priorityQueue[K, V]) remove(item *item[K, V]) {
 	heap.Remove(pq, item.queueIndex)
 }
 
-func (pq priorityQueue) Len() int {
+func (pq priorityQueue[K, V]) Len() int {
 	length := len(pq.items)
 	return length
 }
 
-// Less will consider items with time.Time default value (epoch start) as more than set items.
-func (pq priorityQueue) Less(i, j int) bool {
+// Less will consider items with time.Time default value (epoch start) as
+// more than set items.
+func (pq priorityQueue[K, V]) Less(i, j int) bool {
 	if pq.items[i].expireAt.IsZero() {
 		return false
 	}
@@ -61,24 +62,25 @@ func (pq priorityQueue) Less(i, j int) bool {
 	return pq.items[i].expireAt.Before(pq.items[j].expireAt)
 }
 
-func (pq priorityQueue) Swap(i, j int) {
+func (pq priorityQueue[K, V]) Swap(i, j int) {
 	pq.items[i], pq.items[j] = pq.items[j], pq.items[i]
 	pq.items[i].queueIndex = i
 	pq.items[j].queueIndex = j
 }
 
-func (pq *priorityQueue) Push(x interface{}) {
-	item := x.(*item)
+func (pq *priorityQueue[K, V]) Push(x interface{}) {
+	item := x.(*item[K, V])
 	item.queueIndex = len(pq.items)
 	pq.items = append(pq.items, item)
 }
 
-func (pq *priorityQueue) Pop() interface{} {
+func (pq *priorityQueue[K, V]) Pop() interface{} {
 	old := pq.items
 	n := len(old)
 	item := old[n-1]
 	item.queueIndex = -1
-	// de-reference the element to be popped for Garbage Collector to de-allocate the memory
+	// de-reference the element to be popped for Garbage Collector to
+	// de-allocate the memory
 	old[n-1] = nil
 	pq.items = old[0 : n-1]
 	return item
diff --git a/priority_queue_test.go b/priority_queue_test.go
index 09a89c0..edbf4e0 100644
--- a/priority_queue_test.go
+++ b/priority_queue_test.go
@@ -9,7 +9,7 @@ import (
 )
 
 func TestPriorityQueuePush(t *testing.T) {
-	queue := newPriorityQueue()
+	queue := newPriorityQueue[string, string]()
 	for i := 0; i < 10; i++ {
 		queue.push(newItem(fmt.Sprintf("key_%d", i), "data", -1))
 	}
@@ -17,18 +17,18 @@ func TestPriorityQueuePush(t *testing.T) {
 }
 
 func TestPriorityQueuePop(t *testing.T) {
-	queue := newPriorityQueue()
+	queue := newPriorityQueue[string, string]()
 	for i := 0; i < 10; i++ {
 		queue.push(newItem(fmt.Sprintf("key_%d", i), "data", -1))
 	}
 	for i := 0; i < 5; i++ {
 		item := queue.pop()
-		assert.Equal(t, fmt.Sprintf("%T", item), "*ttlcache.item", "Expected 'item' to be a '*ttlcache.item'")
+		assert.Equal(t, fmt.Sprintf("%T", item), "*ttlcache.item[string,string]", "Expected 'item' to be a '*ttlcache.item'")
 	}
 	assert.Equal(t, queue.Len(), 5, "Expected queue to have 5 elements")
 	for i := 0; i < 5; i++ {
 		item := queue.pop()
-		assert.Equal(t, fmt.Sprintf("%T", item), "*ttlcache.item", "Expected 'item' to be a '*ttlcache.item'")
+		assert.Equal(t, fmt.Sprintf("%T", item), "*ttlcache.item[string,string]", "Expected 'item' to be a '*ttlcache.item'")
 	}
 	assert.Equal(t, queue.Len(), 0, "Expected queue to have 0 elements")
 
@@ -37,7 +37,7 @@ func TestPriorityQueuePop(t *testing.T) {
 }
 
 func TestPriorityQueueCheckOrder(t *testing.T) {
-	queue := newPriorityQueue()
+	queue := newPriorityQueue[string, string]()
 	for i := 10; i > 0; i-- {
 		queue.push(newItem(fmt.Sprintf("key_%d", i), "data", time.Duration(i)*time.Second))
 	}
@@ -48,9 +48,9 @@ func TestPriorityQueueCheckOrder(t *testing.T) {
 }
 
 func TestPriorityQueueRemove(t *testing.T) {
-	queue := newPriorityQueue()
-	items := make(map[string]*item)
-	var itemRemove *item
+	queue := newPriorityQueue[string, string]()
+	items := make(map[string]*item[string, string])
+	var itemRemove *item[string, string]
 	for i := 0; i < 5; i++ {
 		key := fmt.Sprintf("key_%d", i)
 		items[key] = newItem(key, "data", time.Duration(i)*time.Second)
@@ -76,7 +76,7 @@ func TestPriorityQueueRemove(t *testing.T) {
 }
 
 func TestPriorityQueueUpdate(t *testing.T) {
-	queue := newPriorityQueue()
+	queue := newPriorityQueue[string, string]()
 	item := newItem("key", "data", 1*time.Second)
 	queue.push(item)
 	assert.Equal(t, queue.Len(), 1, "The queue is supposed to be with 1 item")