Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: publish instanceID during peer comms #1420

Merged
merged 3 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 15 additions & 7 deletions cmd/refinery/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ import (
_ "go.uber.org/automaxprocs"
"golang.org/x/exp/slices"

"github.com/dgryski/go-wyhash"
"github.com/facebookgo/inject"
"github.com/facebookgo/startstop"
"github.com/google/uuid"
libhoney "github.com/honeycombio/libhoney-go"
"github.com/honeycombio/libhoney-go/transmission"
"github.com/jonboulle/clockwork"
Expand Down Expand Up @@ -85,6 +87,11 @@ func main() {
os.Exit(0)
}

// instanceID is a unique identifier for this instance of refinery.
// We use a hash of a UUID to get a smaller string.
h := wyhash.Hash([]byte(uuid.NewString()), 356783547862)
MikeGoldsmith marked this conversation as resolved.
Show resolved Hide resolved
instanceID := fmt.Sprintf("%08.8x", (h&0xFFFF_FFFF)^(h>>32))

a := app.App{
Version: version,
}
Expand Down Expand Up @@ -265,6 +272,7 @@ func main() {
{Value: refineryHealth},
{Value: &configwatcher.ConfigWatcher{}},
{Value: &a},
{Value: instanceID, Name: "instanceID"},
}
err = g.Provide(objects...)
if err != nil {
Expand Down Expand Up @@ -373,43 +381,43 @@ func main() {
}

var libhoneyMetrics = []metrics.Metadata{
metrics.Metadata{
{
Name: "queue_length",
Type: metrics.Gauge,
Unit: metrics.Dimensionless,
Description: "number of events waiting to be sent to destination",
},
metrics.Metadata{
{
Name: "queue_overflow",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Description: "number of events dropped due to queue overflow",
},
metrics.Metadata{
{
Name: "send_errors",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Description: "number of errors encountered while sending events to destination",
},
metrics.Metadata{
{
Name: "send_retries",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Description: "number of times a batch of events was retried",
},
metrics.Metadata{
{
Name: "batches_sent",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Description: "number of batches of events sent to destination",
},
metrics.Metadata{
{
Name: "messages_sent",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Description: "number of messages sent to destination",
},
metrics.Metadata{
{
Name: "response_decode_errors",
Type: metrics.Counter,
Unit: metrics.Dimensionless,
Expand Down
125 changes: 125 additions & 0 deletions generics/mapttl.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
package generics

import (
"cmp"
"slices"
"sync"
"time"

"github.com/jonboulle/clockwork"
)

type itemWithTTL[V any] struct {
Value V
Expiration time.Time
}

// MapWithTTL is a map with a TTL (time to live) for each item. After the TTL expires,
// the item is automatically removed from the map when any of Length, Keys, SortedKeys,
// Values, or SortedValues is called.
type MapWithTTL[K cmp.Ordered, V any] struct {
Items map[K]itemWithTTL[V]
TTL time.Duration
Clock clockwork.Clock
mut sync.RWMutex
}

func NewMapWithTTL[K cmp.Ordered, V any](ttl time.Duration, initialValues map[K]V) *MapWithTTL[K, V] {
m := &MapWithTTL[K, V]{
Items: make(map[K]itemWithTTL[V]),
TTL: ttl,
Clock: clockwork.NewRealClock(),
}
for k, v := range initialValues {
m.Set(k, v)
}
return m
}

func (m *MapWithTTL[K, V]) Set(k K, v V) {
m.mut.Lock()
defer m.mut.Unlock()
item := itemWithTTL[V]{Value: v, Expiration: m.Clock.Now().Add(m.TTL)}
m.Items[k] = item
}

func (m *MapWithTTL[K, V]) Get(k K) (V, bool) {
var zero V

m.mut.RLock()
defer m.mut.RUnlock()
item, ok := m.Items[k]
if !ok {
return zero, false
}
if item.Expiration.Before(m.Clock.Now()) {
return zero, false
}
return item.Value, true
}

func (m *MapWithTTL[K, V]) Delete(k K) {
m.mut.Lock()
defer m.mut.Unlock()
delete(m.Items, k)
}

func (m *MapWithTTL[K, V]) cleanup() int {
m.mut.Lock()
defer m.mut.Unlock()
count := 0
for k, v := range m.Items {
if v.Expiration.Before(m.Clock.Now()) {
delete(m.Items, k)
continue
}
// only count items that are not expired
count++
}
return count
}

// Keys returns the keys of the map in arbitrary order
func (m *MapWithTTL[K, V]) Keys() []K {
m.cleanup()
m.mut.RLock()
defer m.mut.RUnlock()
keys := make([]K, 0, len(m.Items))
for k := range m.Items {
keys = append(keys, k)
}
return keys
}

// Keys returns the keys of the map, sorted
func (m *MapWithTTL[K, V]) SortedKeys() []K {
keys := m.Keys()
slices.Sort(keys)
return keys
}

// values returns all the values in the map in arbitrary order
func (m *MapWithTTL[K, V]) Values() []V {
m.cleanup()
m.mut.RLock()
defer m.mut.RUnlock()
values := make([]V, 0, len(m.Items))
for _, v := range m.Items {
values = append(values, v.Value)
}
return values
}

// SortedValues returns the values of the map, sorted by key
func (m *MapWithTTL[K, V]) SortedValues() []V {
keys := m.SortedKeys()
values := make([]V, 0, len(keys))
for _, k := range keys {
values = append(values, m.Items[k].Value)
}
return values
}

func (m *MapWithTTL[K, V]) Length() int {
return m.cleanup()
}
79 changes: 79 additions & 0 deletions generics/mapttl_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package generics

import (
"testing"
"time"

"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/assert"
)

func TestMapWithTTLBasics(t *testing.T) {
m := NewMapWithTTL[string, string](100*time.Millisecond, map[string]string{"a": "A", "b": "B"})
fakeclock := clockwork.NewFakeClock()
m.Clock = fakeclock
assert.Equal(t, 2, m.Length())
fakeclock.Advance(50 * time.Millisecond)
m.Set("c", "C")
assert.Equal(t, 3, m.Length())
assert.Equal(t, []string{"a", "b", "c"}, m.SortedKeys())
assert.Equal(t, []string{"A", "B", "C"}, m.SortedValues())
fakeclock.Advance(60 * time.Millisecond)
assert.Equal(t, 1, m.Length())
assert.Equal(t, []string{"c"}, m.Keys())
ch, ok := m.Get("c")
assert.True(t, ok)
assert.Equal(t, "C", ch)
fakeclock.Advance(100 * time.Millisecond)
assert.Equal(t, 0, m.Length())
assert.Equal(t, m.Keys(), []string{})
}

func BenchmarkMapWithTTLContains(b *testing.B) {
m := NewMapWithTTL[string, struct{}](10*time.Second, nil)
fc := clockwork.NewFakeClock()
m.Clock = fc

n := 10000
traceIDs := make([]string, n)
for i := 0; i < n; i++ {
traceIDs[i] = genID(32)
if i%2 == 0 {
m.Set(traceIDs[i], struct{}{})
}
fc.Advance(1 * time.Microsecond)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Get(traceIDs[i%n])
}
}

func BenchmarkMapWithTTLExpire(b *testing.B) {
m := NewMapWithTTL[string, struct{}](1*time.Second, nil)
fc := clockwork.NewFakeClock()
m.Clock = fc

// 1K ids created at 1ms intervals
// we'll check them over the course of 1 second as well, so they should all expire by the end
n := 1000
traceIDs := make([]string, n)
for i := 0; i < n; i++ {
traceIDs[i] = genID(32)
m.Set(traceIDs[i], struct{}{})
fc.Advance(1 * time.Millisecond)
}
// make sure we have 1000 ids now
assert.Equal(b, n, m.Length())
b.ResetTimer()
advanceTime := 100 * time.Second / time.Duration(b.N)
for i := 0; i < b.N; i++ {
m.Get(traceIDs[i%n])
if i%100 == 0 {
fc.Advance(advanceTime)
}
}
b.StopTimer()
// make sure all ids have expired by now (there might be 1 or 2 that haven't)
assert.GreaterOrEqual(b, 2, m.Length())
}
1 change: 1 addition & 0 deletions internal/peer/peers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ func newPeers(c config.Config) (Peers, error) {
{Value: &metrics.NullMetrics{}, Name: "metrics"},
{Value: &logger.NullLogger{}},
{Value: clockwork.NewFakeClock()},
{Value: "12345678", Name: "instanceID"},
}
err := g.Provide(objects...)
if err != nil {
Expand Down
Loading