From c7fb8ad84a8c2346e731f7510649dd2c6ee97eb5 Mon Sep 17 00:00:00 2001 From: Richard Knop Date: Tue, 23 Feb 2021 01:17:49 +0000 Subject: [PATCH] big v2 refactor --- example/{v1 => }/amqp/main.go | 0 example/{v1 => }/redis/main.go | 0 go.mod | 10 +- go.sum | 27 +- integration-tests/amqp_amqp_test.go | 51 -- integration-tests/redis_redis_test.go | 38 -- v2/backends/amqp/amqp.go | 396 +++++++++++ v2/backends/amqp/amqp_test.go | 179 +++++ v2/backends/dynamodb/dynamodb.go | 639 ++++++++++++++++++ v2/backends/dynamodb/dynamodb_export_test.go | 249 +++++++ v2/backends/dynamodb/dynamodb_test.go | 658 +++++++++++++++++++ v2/backends/eager/eager.go | 208 ++++++ v2/backends/eager/eager_test.go | 342 ++++++++++ v2/backends/iface/interfaces.go | 28 + v2/backends/memcache/memcache.go | 292 ++++++++ v2/backends/memcache/memcache_test.go | 142 ++++ v2/backends/mongo/mongodb.go | 358 ++++++++++ v2/backends/mongo/mongodb_test.go | 247 +++++++ v2/backends/null/null.go | 146 ++++ v2/backends/package.go | 1 + v2/backends/redis/goredis.go | 313 +++++++++ v2/backends/redis/goredis_test.go | 161 +++++ v2/backends/redis/redis.go | 354 ++++++++++ v2/backends/redis/redis_test.go | 157 +++++ v2/backends/result/async_result.go | 256 ++++++++ v2/brokers/amqp/amqp.go | 492 ++++++++++++++ v2/brokers/amqp/amqp_concurrence_test.go | 60 ++ v2/brokers/amqp/amqp_test.go | 46 ++ v2/brokers/eager/eager.go | 68 ++ v2/brokers/errs/errors.go | 28 + v2/brokers/gcppubsub/gcp_pubsub.go | 196 ++++++ v2/brokers/iface/interfaces.go | 29 + v2/brokers/package.go | 1 + v2/brokers/redis/goredis.go | 422 ++++++++++++ v2/brokers/redis/redis.go | 487 ++++++++++++++ v2/brokers/sqs/sqs.go | 368 +++++++++++ v2/brokers/sqs/sqs_export_test.go | 211 ++++++ v2/brokers/sqs/sqs_test.go | 340 ++++++++++ v2/common/amqp.go | 147 +++++ v2/common/backend.go | 25 + v2/common/broker.go | 139 ++++ v2/common/broker_test.go | 74 +++ v2/common/redis.go | 87 +++ v2/config/config.go | 180 +++++ v2/config/env.go | 37 ++ v2/config/env_test.go | 46 ++ v2/config/file.go | 82 +++ v2/config/file_test.go | 97 +++ v2/config/test.env | 9 + v2/config/testconfig.yml | 34 + {example/v2 => v2/example}/amqp/main.go | 16 +- {example/v2 => v2/example}/go-redis/main.go | 16 +- {example/v2 => v2/example}/redigo/main.go | 16 +- v2/example/tasks/tasks.go | 75 +++ v2/example/tracers/jaeger.go | 41 ++ v2/go.mod | 25 + v2/go.sum | 559 ++++++++++++++++ v2/integration-tests/amqp_amqp_test.go | 59 ++ v2/integration-tests/redis_redis_test.go | 49 ++ v2/integration-tests/suite_test.go | 488 ++++++++++++++ v2/locks/eager/eager.go | 55 ++ v2/locks/eager/eager_test.go | 42 ++ v2/locks/iface/interfaces.go | 13 + v2/locks/redis/redis.go | 109 +++ v2/log/log.go | 54 ++ v2/log/log_test.go | 14 + v2/retry/fibonacci.go | 20 + v2/retry/fibonacci_test.go | 32 + v2/retry/retry.go | 31 + v2/server.go | 20 +- v2/server_test.go | 8 +- v2/tasks/errors.go | 32 + v2/tasks/reflect.go | 352 ++++++++++ v2/tasks/reflect_test.go | 243 +++++++ v2/tasks/result.go | 40 ++ v2/tasks/result_test.go | 27 + v2/tasks/signature.go | 96 +++ v2/tasks/state.go | 109 +++ v2/tasks/state_test.go | 31 + v2/tasks/task.go | 201 ++++++ v2/tasks/task_test.go | 129 ++++ v2/tasks/validate.go | 42 ++ v2/tasks/validate_test.go | 32 + v2/tasks/workflow.go | 95 +++ v2/tasks/workflow_test.go | 61 ++ v2/tracing/tracing.go | 141 ++++ v2/utils/deepcopy.go | 83 +++ v2/utils/deepcopy_test.go | 32 + v2/utils/utils.go | 14 + v2/utils/utils_test.go | 14 + v2/utils/uuid.go | 11 + v2/utils/uuid_test.go | 13 + v2/worker.go | 12 +- 93 files changed, 12364 insertions(+), 145 deletions(-) rename example/{v1 => }/amqp/main.go (100%) rename example/{v1 => }/redis/main.go (100%) create mode 100644 v2/backends/amqp/amqp.go create mode 100644 v2/backends/amqp/amqp_test.go create mode 100644 v2/backends/dynamodb/dynamodb.go create mode 100644 v2/backends/dynamodb/dynamodb_export_test.go create mode 100644 v2/backends/dynamodb/dynamodb_test.go create mode 100644 v2/backends/eager/eager.go create mode 100644 v2/backends/eager/eager_test.go create mode 100644 v2/backends/iface/interfaces.go create mode 100644 v2/backends/memcache/memcache.go create mode 100644 v2/backends/memcache/memcache_test.go create mode 100644 v2/backends/mongo/mongodb.go create mode 100644 v2/backends/mongo/mongodb_test.go create mode 100644 v2/backends/null/null.go create mode 100644 v2/backends/package.go create mode 100644 v2/backends/redis/goredis.go create mode 100644 v2/backends/redis/goredis_test.go create mode 100644 v2/backends/redis/redis.go create mode 100644 v2/backends/redis/redis_test.go create mode 100644 v2/backends/result/async_result.go create mode 100644 v2/brokers/amqp/amqp.go create mode 100644 v2/brokers/amqp/amqp_concurrence_test.go create mode 100644 v2/brokers/amqp/amqp_test.go create mode 100644 v2/brokers/eager/eager.go create mode 100644 v2/brokers/errs/errors.go create mode 100644 v2/brokers/gcppubsub/gcp_pubsub.go create mode 100644 v2/brokers/iface/interfaces.go create mode 100644 v2/brokers/package.go create mode 100644 v2/brokers/redis/goredis.go create mode 100644 v2/brokers/redis/redis.go create mode 100644 v2/brokers/sqs/sqs.go create mode 100644 v2/brokers/sqs/sqs_export_test.go create mode 100644 v2/brokers/sqs/sqs_test.go create mode 100644 v2/common/amqp.go create mode 100644 v2/common/backend.go create mode 100644 v2/common/broker.go create mode 100644 v2/common/broker_test.go create mode 100644 v2/common/redis.go create mode 100644 v2/config/config.go create mode 100644 v2/config/env.go create mode 100644 v2/config/env_test.go create mode 100644 v2/config/file.go create mode 100644 v2/config/file_test.go create mode 100644 v2/config/test.env create mode 100644 v2/config/testconfig.yml rename {example/v2 => v2/example}/amqp/main.go (96%) rename {example/v2 => v2/example}/go-redis/main.go (96%) rename {example/v2 => v2/example}/redigo/main.go (96%) create mode 100644 v2/example/tasks/tasks.go create mode 100644 v2/example/tracers/jaeger.go create mode 100644 v2/go.mod create mode 100644 v2/go.sum create mode 100644 v2/integration-tests/amqp_amqp_test.go create mode 100644 v2/integration-tests/redis_redis_test.go create mode 100644 v2/integration-tests/suite_test.go create mode 100644 v2/locks/eager/eager.go create mode 100644 v2/locks/eager/eager_test.go create mode 100644 v2/locks/iface/interfaces.go create mode 100644 v2/locks/redis/redis.go create mode 100644 v2/log/log.go create mode 100644 v2/log/log_test.go create mode 100644 v2/retry/fibonacci.go create mode 100644 v2/retry/fibonacci_test.go create mode 100644 v2/retry/retry.go create mode 100644 v2/tasks/errors.go create mode 100644 v2/tasks/reflect.go create mode 100644 v2/tasks/reflect_test.go create mode 100644 v2/tasks/result.go create mode 100644 v2/tasks/result_test.go create mode 100644 v2/tasks/signature.go create mode 100644 v2/tasks/state.go create mode 100644 v2/tasks/state_test.go create mode 100644 v2/tasks/task.go create mode 100644 v2/tasks/task_test.go create mode 100644 v2/tasks/validate.go create mode 100644 v2/tasks/validate_test.go create mode 100644 v2/tasks/workflow.go create mode 100644 v2/tasks/workflow_test.go create mode 100644 v2/tracing/tracing.go create mode 100644 v2/utils/deepcopy.go create mode 100644 v2/utils/deepcopy_test.go create mode 100644 v2/utils/utils.go create mode 100644 v2/utils/utils_test.go create mode 100644 v2/utils/uuid.go create mode 100644 v2/utils/uuid_test.go diff --git a/example/v1/amqp/main.go b/example/amqp/main.go similarity index 100% rename from example/v1/amqp/main.go rename to example/amqp/main.go diff --git a/example/v1/redis/main.go b/example/redis/main.go similarity index 100% rename from example/v1/redis/main.go rename to example/redis/main.go diff --git a/go.mod b/go.mod index 552c682e4..16c97b4fc 100644 --- a/go.mod +++ b/go.mod @@ -1,28 +1,26 @@ module github.com/RichardKnop/machinery +go 1.15 + require ( cloud.google.com/go v0.76.0 // indirect cloud.google.com/go/pubsub v1.9.1 github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae + github.com/RichardKnop/machinery/v2 v2.0.4 // indirect github.com/aws/aws-sdk-go v1.37.5 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/go-redis/redis/v8 v8.5.0 github.com/go-redsync/redsync/v4 v4.0.4 - github.com/golang/snappy v0.0.2 // indirect github.com/gomodule/redigo v2.0.0+incompatible github.com/google/uuid v1.2.0 - github.com/hashicorp/errwrap v1.1.0 // indirect github.com/kelseyhightower/envconfig v1.4.0 github.com/klauspost/compress v1.11.7 // indirect github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/robfig/cron/v3 v3.0.1 - github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/streadway/amqp v1.0.0 github.com/stretchr/testify v1.6.1 github.com/urfave/cli v1.22.5 - github.com/xdg/stringprep v1.0.0 // indirect go.mongodb.org/mongo-driver v1.4.6 go.opencensus.io v0.22.6 // indirect golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect @@ -33,5 +31,3 @@ require ( ) replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 - -go 1.15 diff --git a/go.sum b/go.sum index ceb3359f8..0af9b88be 100644 --- a/go.sum +++ b/go.sum @@ -13,6 +13,7 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.71.0/go.mod h1:qZfY4Y7AEIQwG/fQYD3xrxLNkQZ0Xzf3HGeqCkA6LVM= cloud.google.com/go v0.72.0 h1:eWRCuwubtDrCJG0oSUMgnsbD4CmPFQF2ei4OFbXvwww= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.73.0/go.mod h1:BkDh9dFvGjCitVw03TNjKbBxXNKULXXIq6orU6HrJ4Q= @@ -31,6 +32,7 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.8.3/go.mod h1:m8NMRz5lt0YjbQQ40RjocDVRjgYyzyYpP6ix3dxwRno= cloud.google.com/go/pubsub v1.9.1 h1:hXEte3a/Brd+Tl9ecEkHH3ow9wpnOTZ28lSOszYj6Cg= cloud.google.com/go/pubsub v1.9.1/go.mod h1:7QTUeCiy+P1dVPO8hHVbZSHDfibbgm1gbKyOVYnqb8g= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= @@ -43,7 +45,10 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae h1:DcFpTQBYQ9Ct2d6sC7ol0/ynxc2pO1cpGUM+f4t5adg= github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae/go.mod h1:rJJ84PyA/Wlmw1hO+xTzV2wsSUon6J5ktg0g8BF2PuU= +github.com/RichardKnop/machinery/v2 v2.0.4 h1:Dbg44B+9VPYyZfQ9KBmVDFePPRcHUjSIWS+fHeDdv5I= +github.com/RichardKnop/machinery/v2 v2.0.4/go.mod h1:rvTUa1ItiI426Fxtd94kA5hrKUfpWrCtEcFtruNHaWE= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.35.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.5 h1:9zJ1aXRk1gLSWEeaMXa7Hbv1pIM915T2tpaIJi0+mkA= github.com/aws/aws-sdk-go v1.37.5/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= @@ -59,7 +64,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -83,6 +87,7 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.1.1/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= +github.com/go-redis/redis/v8 v8.4.0/go.mod h1:A1tbYoHSa1fXwN+//ljcCYYJeLmVrwL9hbQN45Jdy0M= github.com/go-redis/redis/v8 v8.5.0 h1:L3r1Q3I5WOUdXZGCP6g44EruKh0u3n6co5Hl5xWkdGA= github.com/go-redis/redis/v8 v8.5.0/go.mod h1:YmEcgBDttjnkbMzDAhDtQxY9yVA7jMN6PCR5HeMvqFE= github.com/go-redsync/redsync/v4 v4.0.4 h1:ru0qG+VCefaZSx3a5ADmlKZXkNdgeeYWIuymDu/tzV8= @@ -142,7 +147,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= @@ -183,7 +187,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -207,6 +210,7 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -225,12 +229,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -249,7 +255,6 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -276,12 +281,12 @@ github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU= go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -294,6 +299,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +go.opentelemetry.io/otel v0.14.0/go.mod h1:vH5xEuwy7Rts0GNtsCW3HYQoZDY+OmBJ6t1bFGGlxgw= go.opentelemetry.io/otel v0.16.0 h1:uIWEbdeb4vpKPGITLsRVUS44L5oDbDUCZxn8lkxhmgw= go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -304,6 +310,7 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -339,6 +346,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -371,7 +379,9 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -467,6 +477,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -513,7 +524,9 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -543,6 +556,7 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= @@ -587,7 +601,9 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201209185603-f92720507ed4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -609,6 +625,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= diff --git a/integration-tests/amqp_amqp_test.go b/integration-tests/amqp_amqp_test.go index 23384940b..93295d148 100644 --- a/integration-tests/amqp_amqp_test.go +++ b/integration-tests/amqp_amqp_test.go @@ -6,11 +6,6 @@ import ( "github.com/RichardKnop/machinery/v1" "github.com/RichardKnop/machinery/v1/config" - - amqpbackend "github.com/RichardKnop/machinery/v1/backends/amqp" - amqpbroker "github.com/RichardKnop/machinery/v1/brokers/amqp" - eagerlock "github.com/RichardKnop/machinery/v1/locks/eager" - machineryV2 "github.com/RichardKnop/machinery/v2" ) func TestAmqpAmqp(t *testing.T) { @@ -52,49 +47,3 @@ func TestAmqpAmqp(t *testing.T) { go worker.Launch() testAll(server, t) } - -func TestAmqpAmqp_V2(t *testing.T) { - amqpURL := os.Getenv("AMQP_URL") - if amqpURL == "" { - t.Skip("AMQP_URL is not defined") - } - - finalAmqpURL := amqpURL - var finalSeparator string - - amqpURLs := os.Getenv("AMQP_URLS") - if amqpURLs != "" { - separator := os.Getenv("AMQP_URLS_SEPARATOR") - if separator == "" { - return - } - finalSeparator = separator - finalAmqpURL = amqpURLs - } - - cnf := &config.Config{ - Broker: finalAmqpURL, - MultipleBrokerSeparator: finalSeparator, - DefaultQueue: "machinery_tasks", - ResultBackend: amqpURL, - ResultsExpireIn: 3600, - AMQP: &config.AMQPConfig{ - Exchange: "test_exchange", - ExchangeType: "direct", - BindingKey: "test_task", - PrefetchCount: 1, - }, - } - - broker := amqpbroker.New(cnf) - backend := amqpbackend.New(cnf) - lock := eagerlock.New() - server := machineryV2.NewServer(cnf, broker, backend, lock) - - registerTestTasks(server) - - worker := server.NewWorker("test_worker", 0) - defer worker.Quit() - go worker.Launch() - testAll(server, t) -} diff --git a/integration-tests/redis_redis_test.go b/integration-tests/redis_redis_test.go index dd757544d..58b654f58 100644 --- a/integration-tests/redis_redis_test.go +++ b/integration-tests/redis_redis_test.go @@ -9,11 +9,6 @@ import ( "github.com/RichardKnop/machinery/v1" "github.com/RichardKnop/machinery/v1/config" - - redisbackend "github.com/RichardKnop/machinery/v1/backends/redis" - redisbroker "github.com/RichardKnop/machinery/v1/brokers/redis" - eagerlock "github.com/RichardKnop/machinery/v1/locks/eager" - machineryV2 "github.com/RichardKnop/machinery/v2" ) func TestRedisRedis_Redigo(t *testing.T) { @@ -36,39 +31,6 @@ func TestRedisRedis_Redigo(t *testing.T) { testAll(server, t) } -func TestRedisRedis_V2_GoRedis(t *testing.T) { - redisURL := os.Getenv("REDIS_URL") - if redisURL == "" { - t.Skip("REDIS_URL is not defined") - } - - cnf := &config.Config{ - DefaultQueue: "machinery_tasks", - ResultsExpireIn: 3600, - Redis: &config.RedisConfig{ - MaxIdle: 3, - IdleTimeout: 240, - ReadTimeout: 15, - WriteTimeout: 15, - ConnectTimeout: 15, - NormalTasksPollPeriod: 1000, - DelayedTasksPollPeriod: 500, - }, - } - - broker := redisbroker.NewGR(cnf, []string{redisURL}, 0) - backend := redisbackend.NewGR(cnf, []string{redisURL}, 0) - lock := eagerlock.New() - server := machineryV2.NewServer(cnf, broker, backend, lock) - - registerTestTasks(server) - - worker := server.NewWorker("test_worker", 0) - defer worker.Quit() - go worker.Launch() - testAll(server, t) -} - func TestRedisRedisNormalTaskPollPeriodLessThan1SecondShouldNotFailNextTask(t *testing.T) { redisURL := os.Getenv("REDIS_URL") if redisURL == "" { diff --git a/v2/backends/amqp/amqp.go b/v2/backends/amqp/amqp.go new file mode 100644 index 000000000..c9103bf2e --- /dev/null +++ b/v2/backends/amqp/amqp.go @@ -0,0 +1,396 @@ +package amqp + +// NOTE: Using AMQP as a result backend is quite tricky since every time we +// read a message from the queue keeping task states, the message is removed +// from the queue. This leads to problems with keeping a reliable state of a +// group of tasks since concurrent processes updating the group state cause +// race conditions and inconsistent state. +// +// This is avoided by a "clever" hack. A special queue identified by a group +// UUID is created and we store serialised TaskState objects of successfully +// completed tasks. By inspecting the queue we can then say: +// 1) If all group tasks finished (number of unacked messages = group task count) +// 2) If all group tasks finished AND succeeded (by consuming the queue) +// +// It is important to consume the queue exclusively to avoid race conditions. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/streadway/amqp" +) + +// Backend represents an AMQP result backend +type Backend struct { + common.Backend + common.AMQPConnector +} + +// New creates Backend instance +func New(cnf *config.Config) iface.Backend { + return &Backend{Backend: common.NewBackend(cnf), AMQPConnector: common.AMQPConnector{}} +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + return nil +} + +// GroupCompleted returns true if all tasks in a group finished +// NOTE: Given AMQP limitation this will only return true if all finished +// tasks were successful as we do not keep track of completed failed tasks +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) + if err != nil { + return false, err + } + defer b.Close(channel, conn) + + queueState, err := b.InspectQueue(channel, groupUUID) + if err != nil { + return false, nil + } + + return queueState.Messages == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) + if err != nil { + return nil, err + } + defer b.Close(channel, conn) + + queueState, err := b.InspectQueue(channel, groupUUID) + if err != nil { + return nil, err + } + + if queueState.Messages != groupTaskCount { + return nil, fmt.Errorf("Already consumed: %v", err) + } + + deliveries, err := channel.Consume( + groupUUID, // queue name + "", // consumer tag + false, // auto-ack + true, // exclusive + false, // no-local + false, // no-wait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Queue consume error: %s", err) + } + + states := make([]*tasks.TaskState, groupTaskCount) + for i := 0; i < groupTaskCount; i++ { + d := <-deliveries + + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body))) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + d.Nack(false, false) // multiple, requeue + return nil, err + } + + d.Ack(false) // multiple + + states[i] = state + } + + return states, nil +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never trigerred multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) + if err != nil { + return false, err + } + defer b.Close(channel, conn) + + _, err = b.InspectQueue(channel, amqmChordTriggeredQueue(groupUUID)) + if err != nil { + return true, nil + } + + return false, nil +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + taskState := tasks.NewPendingTaskState(signature) + return b.updateState(taskState) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + taskState := tasks.NewReceivedTaskState(signature) + return b.updateState(taskState) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + taskState := tasks.NewStartedTaskState(signature) + return b.updateState(taskState) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + state := tasks.NewRetryTaskState(signature) + return b.updateState(state) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + taskState := tasks.NewSuccessTaskState(signature, results) + + if err := b.updateState(taskState); err != nil { + return err + } + + if signature.GroupUUID == "" { + return nil + } + + return b.markTaskCompleted(signature, taskState) +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + taskState := tasks.NewFailureTaskState(signature, err) + + if err := b.updateState(taskState); err != nil { + return err + } + + if signature.GroupUUID == "" { + return nil + } + + return b.markTaskCompleted(signature, taskState) +} + +// GetState returns the latest task state. It will only return the status once +// as the message will get consumed and removed from the queue. +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + declareQueueArgs := amqp.Table{ + // Time in milliseconds + // after that message will expire + "x-message-ttl": int32(b.getExpiresIn()), + // Time after that the queue will be deleted. + "x-expires": int32(b.getExpiresIn()), + } + conn, channel, _, _, _, err := b.Connect( + b.GetConfig().ResultBackend, + "", + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + taskUUID, // queue name + false, // queue durable + true, // queue delete when unused + taskUUID, // queue binding key + nil, // exchange declare args + declareQueueArgs, // queue declare args + nil, // queue binding args + ) + if err != nil { + return nil, err + } + defer b.Close(channel, conn) + + d, ok, err := channel.Get( + taskUUID, // queue name + false, // multiple + ) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("No state ready") + } + + d.Ack(false) + + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body))) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + log.ERROR.Printf("Failed to unmarshal task state: %s", string(d.Body)) + log.ERROR.Print(err) + return nil, err + } + + return state, nil +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) + if err != nil { + return err + } + defer b.Close(channel, conn) + + return b.DeleteQueue(channel, taskUUID) +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) + if err != nil { + return err + } + defer b.Close(channel, conn) + + b.DeleteQueue(channel, amqmChordTriggeredQueue(groupUUID)) + + return b.DeleteQueue(channel, groupUUID) +} + +// updateState saves current task state +func (b *Backend) updateState(taskState *tasks.TaskState) error { + message, err := json.Marshal(taskState) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + declareQueueArgs := amqp.Table{ + // Time in milliseconds + // after that message will expire + "x-message-ttl": int32(b.getExpiresIn()), + // Time after that the queue will be deleted. + "x-expires": int32(b.getExpiresIn()), + } + conn, channel, queue, confirmsChan, _, err := b.Connect( + b.GetConfig().ResultBackend, + "", + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + taskState.TaskUUID, // queue name + false, // queue durable + true, // queue delete when unused + taskState.TaskUUID, // queue binding key + nil, // exchange declare args + declareQueueArgs, // queue declare args + nil, // queue binding args + ) + if err != nil { + return err + } + defer b.Close(channel, conn) + + if err := channel.Publish( + b.GetConfig().AMQP.Exchange, // exchange + queue.Name, // routing key + false, // mandatory + false, // immediate + amqp.Publishing{ + ContentType: "application/json", + Body: message, + DeliveryMode: amqp.Persistent, // Persistent // Transient + }, + ); err != nil { + return err + } + + confirmed := <-confirmsChan + + if confirmed.Ack { + return nil + } + + return fmt.Errorf("Failed delivery of delivery tag: %d", confirmed.DeliveryTag) +} + +// getExpiresIn returns expiration time +func (b *Backend) getExpiresIn() int { + resultsExpireIn := b.GetConfig().ResultsExpireIn * 1000 + if resultsExpireIn == 0 { + // // expire results after 1 hour by default + resultsExpireIn = config.DefaultResultsExpireIn * 1000 + } + return resultsExpireIn +} + +// markTaskCompleted marks task as completed in either groupdUUID_success +// or groupUUID_failure queue. This is important for GroupCompleted and +// GroupSuccessful methods +func (b *Backend) markTaskCompleted(signature *tasks.Signature, taskState *tasks.TaskState) error { + if signature.GroupUUID == "" || signature.GroupTaskCount == 0 { + return nil + } + + message, err := json.Marshal(taskState) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + declareQueueArgs := amqp.Table{ + // Time in milliseconds + // after that message will expire + "x-message-ttl": int32(b.getExpiresIn()), + // Time after that the queue will be deleted. + "x-expires": int32(b.getExpiresIn()), + } + conn, channel, queue, confirmsChan, _, err := b.Connect( + b.GetConfig().ResultBackend, + "", + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + signature.GroupUUID, // queue name + false, // queue durable + true, // queue delete when unused + signature.GroupUUID, // queue binding key + nil, // exchange declare args + declareQueueArgs, // queue declare args + nil, // queue binding args + ) + if err != nil { + return err + } + defer b.Close(channel, conn) + + if err := channel.Publish( + b.GetConfig().AMQP.Exchange, // exchange + queue.Name, // routing key + false, // mandatory + false, // immediate + amqp.Publishing{ + ContentType: "application/json", + Body: message, + DeliveryMode: amqp.Persistent, // Persistent // Transient + }, + ); err != nil { + return err + } + + confirmed := <-confirmsChan + + if !confirmed.Ack { + return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag) + } + + return nil +} + +func amqmChordTriggeredQueue(groupUUID string) string { + return fmt.Sprintf("%s_chord_triggered", groupUUID) +} diff --git a/v2/backends/amqp/amqp_test.go b/v2/backends/amqp/amqp_test.go new file mode 100644 index 000000000..1a84c3b13 --- /dev/null +++ b/v2/backends/amqp/amqp_test.go @@ -0,0 +1,179 @@ +package amqp_test + +import ( + "os" + "testing" + "time" + + "github.com/RichardKnop/machinery/v2/backends/amqp" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +var ( + amqpConfig *config.Config +) + +func init() { + amqpURL := os.Getenv("AMQP_URL") + if amqpURL == "" { + return + } + + finalAmqpURL := amqpURL + var finalSeparator string + + amqpURLs := os.Getenv("AMQP_URLS") + if amqpURLs != "" { + separator := os.Getenv("AMQP_URLS_SEPARATOR") + if separator == "" { + return + } + finalSeparator = separator + finalAmqpURL = amqpURLs + } + + amqp2URL := os.Getenv("AMQP2_URL") + if amqp2URL == "" { + amqp2URL = amqpURL + } + + amqpConfig = &config.Config{ + Broker: finalAmqpURL, + MultipleBrokerSeparator: finalSeparator, + DefaultQueue: "test_queue", + ResultBackend: amqp2URL, + AMQP: &config.AMQPConfig{ + Exchange: "test_exchange", + ExchangeType: "direct", + BindingKey: "test_task", + PrefetchCount: 1, + }, + } +} + +func TestGroupCompleted(t *testing.T) { + if os.Getenv("AMQP_URL") == "" { + t.Skip("AMQP_URL is not defined") + } + + groupUUID := "testGroupUUID" + groupTaskCount := 2 + task1 := &tasks.Signature{ + UUID: "testTaskUUID1", + GroupUUID: groupUUID, + GroupTaskCount: groupTaskCount, + } + task2 := &tasks.Signature{ + UUID: "testTaskUUID2", + GroupUUID: groupUUID, + GroupTaskCount: groupTaskCount, + } + + backend := amqp.New(amqpConfig) + + // Cleanup before the test + backend.PurgeState(task1.UUID) + backend.PurgeState(task2.UUID) + backend.PurgeGroupMeta(groupUUID) + + groupCompleted, err := backend.GroupCompleted(groupUUID, groupTaskCount) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + backend.InitGroup(groupUUID, []string{task1.UUID, task2.UUID}) + + groupCompleted, err = backend.GroupCompleted(groupUUID, groupTaskCount) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + backend.SetStatePending(task1) + backend.SetStateStarted(task2) + groupCompleted, err = backend.GroupCompleted(groupUUID, groupTaskCount) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + taskResults := []*tasks.TaskResult{new(tasks.TaskResult)} + backend.SetStateSuccess(task1, taskResults) + backend.SetStateSuccess(task2, taskResults) + groupCompleted, err = backend.GroupCompleted(groupUUID, groupTaskCount) + if assert.NoError(t, err) { + assert.True(t, groupCompleted) + } +} + +func TestGetState(t *testing.T) { + if os.Getenv("AMQP_URL") == "" { + t.Skip("AMQP_URL is not defined") + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + go func() { + backend := amqp.New(amqpConfig) + backend.SetStatePending(signature) + time.Sleep(2 * time.Millisecond) + backend.SetStateReceived(signature) + time.Sleep(2 * time.Millisecond) + backend.SetStateStarted(signature) + time.Sleep(2 * time.Millisecond) + + taskResults := []*tasks.TaskResult{ + { + Type: "float64", + Value: 2, + }, + } + backend.SetStateSuccess(signature, taskResults) + }() + + backend := amqp.New(amqpConfig) + + var ( + taskState *tasks.TaskState + err error + ) + for { + taskState, err = backend.GetState(signature.UUID) + if taskState == nil { + assert.Equal(t, "No state ready", err.Error()) + continue + } + + assert.NoError(t, err) + if taskState.IsCompleted() { + break + } + } +} + +func TestPurgeState(t *testing.T) { + if os.Getenv("AMQP_URL") == "" { + t.Skip("AMQP_URL is not defined") + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend := amqp.New(amqpConfig) + + backend.SetStatePending(signature) + backend.SetStateReceived(signature) + taskState, err := backend.GetState(signature.UUID) + assert.NotNil(t, taskState) + assert.NoError(t, err) + + backend.PurgeState(taskState.TaskUUID) + taskState, err = backend.GetState(signature.UUID) + assert.Nil(t, taskState) + assert.Error(t, err) +} diff --git a/v2/backends/dynamodb/dynamodb.go b/v2/backends/dynamodb/dynamodb.go new file mode 100644 index 000000000..9401cb6b7 --- /dev/null +++ b/v2/backends/dynamodb/dynamodb.go @@ -0,0 +1,639 @@ +package dynamodb + +import ( + "errors" + "fmt" + "math" + "time" + + "github.com/aws/aws-sdk-go/aws/session" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" +) + +const ( + BatchItemsLimit = 99 + MaxFetchAttempts = 3 +) + +// Backend ... +type Backend struct { + common.Backend + cnf *config.Config + client dynamodbiface.DynamoDBAPI +} + +// New creates a Backend instance +func New(cnf *config.Config) iface.Backend { + backend := &Backend{Backend: common.NewBackend(cnf), cnf: cnf} + + if cnf.DynamoDB != nil && cnf.DynamoDB.Client != nil { + backend.client = cnf.DynamoDB.Client + } else { + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + backend.client = dynamodb.New(sess) + } + + // Check if needed tables exist + err := backend.checkRequiredTablesIfExist() + if err != nil { + log.FATAL.Printf("Failed to prepare tables. Error: %v", err) + } + return backend +} + +// InitGroup ... +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + meta := tasks.GroupMeta{ + GroupUUID: groupUUID, + TaskUUIDs: taskUUIDs, + CreatedAt: time.Now().UTC(), + TTL: b.getExpirationTime(), + } + av, err := dynamodbattribute.MarshalMap(meta) + if err != nil { + log.ERROR.Printf("Error when marshaling Dynamodb attributes. Err: %v", err) + return err + } + input := &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), + } + _, err = b.client.PutItem(input) + + if err != nil { + log.ERROR.Printf("Got error when calling PutItem: %v; Error: %v", input, err) + return err + } + return nil +} + +// GroupCompleted ... +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + taskStates, err := b.getStates(groupMeta.TaskUUIDs) + if err != nil { + return false, err + } + var countSuccessTasks = 0 + for _, taskState := range taskStates { + if taskState.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates ... +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return nil, err + } + + return b.getStates(groupMeta.TaskUUIDs) +} + +// TriggerChord ... +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + // Get the group meta data + groupMeta, err := b.getGroupMeta(groupUUID) + + if err != nil { + return false, err + } + + // Chord has already been triggered, return false (should not trigger again) + if groupMeta.ChordTriggered { + return false, nil + } + + // If group meta is locked, wait until it's unlocked + for groupMeta.Lock { + groupMeta, _ = b.getGroupMeta(groupUUID) + log.WARNING.Printf("Group [%s] locked, waiting", groupUUID) + time.Sleep(time.Millisecond * 5) + } + + // Acquire lock + if err = b.lockGroupMeta(groupUUID); err != nil { + return false, err + } + defer b.unlockGroupMeta(groupUUID) + + // update group meta data + err = b.chordTriggered(groupUUID) + if err != nil { + return false, err + } + return true, err +} + +// SetStatePending ... +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + taskState := tasks.NewPendingTaskState(signature) + // taskUUID is the primary key of the table, so a new task need to be created first, instead of using dynamodb.UpdateItemInput directly + return b.initTaskState(taskState) +} + +// SetStateReceived ... +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + taskState := tasks.NewReceivedTaskState(signature) + return b.setTaskState(taskState) +} + +// SetStateStarted ... +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + taskState := tasks.NewStartedTaskState(signature) + return b.setTaskState(taskState) +} + +// SetStateRetry ... +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + taskState := tasks.NewRetryTaskState(signature) + return b.setTaskState(taskState) +} + +// SetStateSuccess ... +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + taskState := tasks.NewSuccessTaskState(signature, results) + taskState.TTL = b.getExpirationTime() + return b.setTaskState(taskState) +} + +// SetStateFailure ... +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + taskState := tasks.NewFailureTaskState(signature, err) + taskState.TTL = b.getExpirationTime() + return b.updateToFailureStateWithError(taskState) +} + +// GetState ... +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + result, err := b.client.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), + Key: map[string]*dynamodb.AttributeValue{ + "TaskUUID": { + S: aws.String(taskUUID), + }, + }, + ConsistentRead: aws.Bool(true), + }) + if err != nil { + return nil, err + } + return b.unmarshalTaskStateGetItemResult(result) +} + +// getStates returns the current states for the given list of tasks. +// It uses batch fetch API. If any keys fail to fetch, it'll retry with exponential backoff until maxFetchAttempts times. +func (b *Backend) getStates(tasksToFetch []string) ([]*tasks.TaskState, error) { + fetchedTaskStates := make([]*tasks.TaskState, 0, len(tasksToFetch)) + var unfetchedTaskIDs []string + + // try until all keys are fetched or until we run out of attempts. + for attempt := 0; len(tasksToFetch) > 0 && attempt < MaxFetchAttempts; attempt++ { + unfetchedTaskIDs = nil + for _, batch := range chunkTasks(tasksToFetch, BatchItemsLimit) { + fetched, unfetched, err := b.batchFetchTaskStates(batch) + if err != nil { + return nil, err + } + fetchedTaskStates = append(fetchedTaskStates, fetched...) + unfetchedTaskIDs = append(unfetchedTaskIDs, unfetched...) + } + tasksToFetch = unfetchedTaskIDs + + // Check if there were any tasks that were not fetched. If so, retry with exponential backoff. + if len(unfetchedTaskIDs) > 0 { + backoffDuration := time.Duration(math.Pow(2, float64(attempt))) * time.Second + log.DEBUG.Printf("Unable to fetch [%d] keys on attempt [%d]. Sleeping for [%s]", len(unfetchedTaskIDs), attempt+1, backoffDuration) + time.Sleep(backoffDuration) + } + } + + if len(unfetchedTaskIDs) > 0 { + return nil, fmt.Errorf("Failed to fetch [%d] keys even after retries: [%+v]", len(unfetchedTaskIDs), unfetchedTaskIDs) + } + + return fetchedTaskStates, nil +} + +// batchFetchTaskStates returns the current states of the given tasks by fetching them all in a single batched API. +// DynamoDB's BatchGetItem() can return partial results. If there are any unfetched keys, they are returned as second +// return value so that the caller can retry those keys. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#DynamoDB.BatchGetItem +func (b *Backend) batchFetchTaskStates(taskUUIDs []string) ([]*tasks.TaskState, []string, error) { + tableName := b.cnf.DynamoDB.TaskStatesTable + keys := make([]map[string]*dynamodb.AttributeValue, len(taskUUIDs)) + for i, tid := range taskUUIDs { + keys[i] = map[string]*dynamodb.AttributeValue{ + "TaskUUID": { + S: aws.String(tid), + }, + } + } + + input := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ + tableName: { + ConsistentRead: aws.Bool(true), + Keys: keys, + }, + }, + } + + result, err := b.client.BatchGetItem(input) + if err != nil { + return nil, nil, fmt.Errorf("BatchGetItem failed. Error: [%s]", err) + } + + fetchedKeys, ok := result.Responses[tableName] + if !ok { + return nil, nil, fmt.Errorf("no keys returned from the table: [%s]", tableName) + } + + states := []*tasks.TaskState{} + if err := dynamodbattribute.UnmarshalListOfMaps(fetchedKeys, &states); err != nil { + return nil, nil, fmt.Errorf("Got error when unmarshal map. Error: %v", err) + } + + // Look for any unprocessed keys + var unfetchedKeys []string + if result.UnprocessedKeys[tableName] != nil { + unfetchedKeys, err = getUnfetchedKeys(result.UnprocessedKeys[tableName]) + if err != nil { + return nil, nil, fmt.Errorf("unable to fetch some keys: [%+v]. Error: [%s]", result.UnprocessedKeys, err) + } + } + + return states, unfetchedKeys, nil +} + +// PurgeState ... +func (b *Backend) PurgeState(taskUUID string) error { + input := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "TaskUUID": { + N: aws.String(taskUUID), + }, + }, + TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), + } + _, err := b.client.DeleteItem(input) + + if err != nil { + return err + } + return nil +} + +// PurgeGroupMeta ... +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + input := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "GroupUUID": { + N: aws.String(groupUUID), + }, + }, + TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), + } + _, err := b.client.DeleteItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { + result, err := b.client.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), + Key: map[string]*dynamodb.AttributeValue{ + "GroupUUID": { + S: aws.String(groupUUID), + }, + }, + ConsistentRead: aws.Bool(true), + }) + if err != nil { + log.ERROR.Printf("Error when getting group [%s]. Error: [%s]", groupUUID, err) + return nil, err + } + item, err := b.unmarshalGroupMetaGetItemResult(result) + if err != nil { + log.ERROR.Printf("Failed to unmarshal item. Error: [%s], Result: [%+v]", err, result) + return nil, err + } + return item, nil +} + +func (b *Backend) lockGroupMeta(groupUUID string) error { + err := b.updateGroupMetaLock(groupUUID, true) + if err != nil { + return err + } + return nil +} + +func (b *Backend) unlockGroupMeta(groupUUID string) error { + err := b.updateGroupMetaLock(groupUUID, false) + if err != nil { + return err + } + return nil +} + +func (b *Backend) updateGroupMetaLock(groupUUID string, status bool) error { + input := &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]*string{ + "#L": aws.String("Lock"), + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":l": { + BOOL: aws.Bool(status), + }, + }, + Key: map[string]*dynamodb.AttributeValue{ + "GroupUUID": { + S: aws.String(groupUUID), + }, + }, + ReturnValues: aws.String("UPDATED_NEW"), + TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), + UpdateExpression: aws.String("SET #L = :l"), + } + + _, err := b.client.UpdateItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) chordTriggered(groupUUID string) error { + input := &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]*string{ + "#CT": aws.String("ChordTriggered"), + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":ct": { + BOOL: aws.Bool(true), + }, + }, + Key: map[string]*dynamodb.AttributeValue{ + "GroupUUID": { + S: aws.String(groupUUID), + }, + }, + ReturnValues: aws.String("UPDATED_NEW"), + TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), + UpdateExpression: aws.String("SET #CT = :ct"), + } + + _, err := b.client.UpdateItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) setTaskState(taskState *tasks.TaskState) error { + expAttributeNames := map[string]*string{ + "#S": aws.String("State"), + } + expAttributeValues := map[string]*dynamodb.AttributeValue{ + ":s": { + S: aws.String(taskState.State), + }, + } + keyAttributeValues := map[string]*dynamodb.AttributeValue{ + "TaskUUID": { + S: aws.String(taskState.TaskUUID), + }, + } + exp := "SET #S = :s" + if !taskState.CreatedAt.IsZero() { + expAttributeNames["#C"] = aws.String("CreatedAt") + expAttributeValues[":c"] = &dynamodb.AttributeValue{ + S: aws.String(taskState.CreatedAt.String()), + } + exp += ", #C = :c" + } + if taskState.TTL > 0 { + expAttributeNames["#T"] = aws.String("TTL") + expAttributeValues[":t"] = &dynamodb.AttributeValue{ + N: aws.String(fmt.Sprintf("%d", taskState.TTL)), + } + exp += ", #T = :t" + } + if taskState.Results != nil && len(taskState.Results) != 0 { + expAttributeNames["#R"] = aws.String("Results") + var results []*dynamodb.AttributeValue + for _, r := range taskState.Results { + avMap := map[string]*dynamodb.AttributeValue{ + "Type": { + S: aws.String(r.Type), + }, + "Value": { + S: aws.String(fmt.Sprintf("%v", r.Value)), + }, + } + rs := &dynamodb.AttributeValue{ + M: avMap, + } + results = append(results, rs) + } + expAttributeValues[":r"] = &dynamodb.AttributeValue{ + L: results, + } + exp += ", #R = :r" + } + input := &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: expAttributeNames, + ExpressionAttributeValues: expAttributeValues, + Key: keyAttributeValues, + ReturnValues: aws.String("UPDATED_NEW"), + TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), + UpdateExpression: aws.String(exp), + } + + _, err := b.client.UpdateItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) initTaskState(taskState *tasks.TaskState) error { + av, err := dynamodbattribute.MarshalMap(taskState) + input := &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), + } + if err != nil { + return err + } + _, err = b.client.PutItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) updateToFailureStateWithError(taskState *tasks.TaskState) error { + input := &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]*string{ + "#S": aws.String("State"), + "#E": aws.String("Error"), + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":s": { + S: aws.String(taskState.State), + }, + ":e": { + S: aws.String(taskState.Error), + }, + }, + Key: map[string]*dynamodb.AttributeValue{ + "TaskUUID": { + S: aws.String(taskState.TaskUUID), + }, + }, + ReturnValues: aws.String("UPDATED_NEW"), + TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), + UpdateExpression: aws.String("SET #S = :s, #E = :e"), + } + + if taskState.TTL > 0 { + input.ExpressionAttributeNames["#T"] = aws.String("TTL") + input.ExpressionAttributeValues[":t"] = &dynamodb.AttributeValue{ + N: aws.String(fmt.Sprintf("%d", taskState.TTL)), + } + input.UpdateExpression = aws.String(aws.StringValue(input.UpdateExpression) + ", #T = :t") + } + + _, err := b.client.UpdateItem(input) + + if err != nil { + return err + } + return nil +} + +func (b *Backend) unmarshalGroupMetaGetItemResult(result *dynamodb.GetItemOutput) (*tasks.GroupMeta, error) { + if result == nil { + err := errors.New("task state is nil") + log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) + return nil, err + } + item := tasks.GroupMeta{} + err := dynamodbattribute.UnmarshalMap(result.Item, &item) + if err != nil { + log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) + return nil, err + } + return &item, err +} + +func (b *Backend) unmarshalTaskStateGetItemResult(result *dynamodb.GetItemOutput) (*tasks.TaskState, error) { + if result == nil { + err := errors.New("task state is nil") + log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) + return nil, err + } + state := tasks.TaskState{} + err := dynamodbattribute.UnmarshalMap(result.Item, &state) + if err != nil { + log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) + return nil, err + } + return &state, nil +} + +func (b *Backend) checkRequiredTablesIfExist() error { + var ( + taskTableName = b.cnf.DynamoDB.TaskStatesTable + groupTableName = b.cnf.DynamoDB.GroupMetasTable + ) + result, err := b.client.ListTables(&dynamodb.ListTablesInput{}) + if err != nil { + return err + } + if !b.tableExists(taskTableName, result.TableNames) { + return errors.New("task table doesn't exist") + } + if !b.tableExists(groupTableName, result.TableNames) { + return errors.New("group table doesn't exist") + } + return nil +} + +func (b *Backend) tableExists(tableName string, tableNames []*string) bool { + for _, t := range tableNames { + if tableName == *t { + return true + } + } + return false +} + +func (b *Backend) getExpirationTime() int64 { + expiresIn := b.GetConfig().ResultsExpireIn + if expiresIn == 0 { + // expire results after 1 hour by default + expiresIn = config.DefaultResultsExpireIn + } + return time.Now().Add(time.Second * time.Duration(expiresIn)).Unix() +} + +// getUnfetchedKeys returns keys that were not fetched in a batch request. +func getUnfetchedKeys(unprocessed *dynamodb.KeysAndAttributes) ([]string, error) { + states := []*tasks.TaskState{} + var taskIDs []string + if err := dynamodbattribute.UnmarshalListOfMaps(unprocessed.Keys, &states); err != nil { + return nil, fmt.Errorf("Got error when unmarshal map. Error: %v", err) + } + for _, s := range states { + taskIDs = append(taskIDs, s.TaskUUID) + } + return taskIDs, nil +} + +// chunkTasks chunks the list of strings into multiple smaller lists of specified size. +func chunkTasks(array []string, chunkSize int) [][]string { + var result [][]string + for len(array) > 0 { + sz := min(len(array), chunkSize) + chunk := array[:sz] + array = array[sz:] + result = append(result, chunk) + } + return result +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/v2/backends/dynamodb/dynamodb_export_test.go b/v2/backends/dynamodb/dynamodb_export_test.go new file mode 100644 index 000000000..9508a763b --- /dev/null +++ b/v2/backends/dynamodb/dynamodb_export_test.go @@ -0,0 +1,249 @@ +package dynamodb + +import ( + "errors" + "os" + + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" +) + +var ( + TestDynamoDBBackend *Backend + TestErrDynamoDBBackend *Backend + TestCnf *config.Config + TestDBClient dynamodbiface.DynamoDBAPI + TestErrDBClient dynamodbiface.DynamoDBAPI + TestGroupMeta *tasks.GroupMeta + TestTask1 map[string]*dynamodb.AttributeValue + TestTask2 map[string]*dynamodb.AttributeValue + TestTask3 map[string]*dynamodb.AttributeValue +) + +type TestDynamoDBClient struct { + dynamodbiface.DynamoDBAPI + PutItemOverride func(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + UpdateItemOverride func(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + GetItemOverride func(input *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + BatchGetItemOverride func(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) +} + +func (t *TestDynamoDBClient) ResetOverrides() { + t.PutItemOverride = nil + t.UpdateItemOverride = nil + t.BatchGetItemOverride = nil +} + +func (t *TestDynamoDBClient) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { + if t.PutItemOverride != nil { + return t.PutItemOverride(input) + } + return &dynamodb.PutItemOutput{}, nil +} +func (t *TestDynamoDBClient) BatchGetItem(input *dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) { + if t.BatchGetItemOverride != nil { + return t.BatchGetItemOverride(input) + } + return &dynamodb.BatchGetItemOutput{}, nil +} + +func (t *TestDynamoDBClient) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) { + if t.GetItemOverride != nil { + return t.GetItemOverride(input) + } + var output *dynamodb.GetItemOutput + switch *input.TableName { + case "group_metas": + output = &dynamodb.GetItemOutput{ + Item: map[string]*dynamodb.AttributeValue{ + "TaskUUIDs": { + L: []*dynamodb.AttributeValue{ + { + S: aws.String("testTaskUUID1"), + }, + { + S: aws.String("testTaskUUID2"), + }, + { + S: aws.String("testTaskUUID3"), + }, + }, + }, + "ChordTriggered": { + BOOL: aws.Bool(false), + }, + "GroupUUID": { + S: aws.String("testGroupUUID"), + }, + "Lock": { + BOOL: aws.Bool(false), + }, + }, + } + case "task_states": + if input.Key["TaskUUID"] == nil { + output = &dynamodb.GetItemOutput{ + Item: map[string]*dynamodb.AttributeValue{ + "Error": { + NULL: aws.Bool(false), + }, + "State": { + S: aws.String(tasks.StatePending), + }, + "TaskUUID": { + S: aws.String("testTaskUUID1"), + }, + "Results:": { + NULL: aws.Bool(true), + }, + }, + } + } else { + if *(input.Key["TaskUUID"].S) == "testTaskUUID1" { + output = &dynamodb.GetItemOutput{ + Item: TestTask1, + } + } else if *(input.Key["TaskUUID"].S) == "testTaskUUID2" { + output = &dynamodb.GetItemOutput{ + Item: TestTask2, + } + + } else if *(input.Key["TaskUUID"].S) == "testTaskUUID3" { + output = &dynamodb.GetItemOutput{ + Item: TestTask3, + } + } + } + + } + return output, nil +} + +func (t *TestDynamoDBClient) DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) { + return &dynamodb.DeleteItemOutput{}, nil +} + +func (t *TestDynamoDBClient) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) { + if t.UpdateItemOverride != nil { + return t.UpdateItemOverride(input) + } + return &dynamodb.UpdateItemOutput{}, nil +} + +func (t *TestDynamoDBClient) ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) { + return &dynamodb.ListTablesOutput{ + TableNames: []*string{ + aws.String("group_metas"), + aws.String("task_states"), + }, + }, nil +} + +// Always returns error +type TestErrDynamoDBClient struct { + dynamodbiface.DynamoDBAPI +} + +func (t *TestErrDynamoDBClient) PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { + return nil, errors.New("error when putting an item") +} + +func (t *TestErrDynamoDBClient) GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) { + return nil, errors.New("error when getting an item") +} + +func (t *TestErrDynamoDBClient) DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) { + return nil, errors.New("error when deleting an item") +} + +func (t *TestErrDynamoDBClient) Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) { + return nil, errors.New("error when scanning an item") +} + +func (t *TestErrDynamoDBClient) UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) { + return nil, errors.New("error when updating an item") +} + +func (t *TestErrDynamoDBClient) ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) { + return nil, errors.New("error when listing tables") +} + +func init() { + TestCnf = &config.Config{ + ResultBackend: os.Getenv("DYNAMODB_URL"), + ResultsExpireIn: 30, + DynamoDB: &config.DynamoDBConfig{ + TaskStatesTable: "task_states", + GroupMetasTable: "group_metas", + }, + } + TestDBClient = new(TestDynamoDBClient) + TestDynamoDBBackend = &Backend{cnf: TestCnf, client: TestDBClient} + + TestErrDBClient = new(TestErrDynamoDBClient) + TestErrDynamoDBBackend = &Backend{cnf: TestCnf, client: TestErrDBClient} + + TestGroupMeta = &tasks.GroupMeta{ + GroupUUID: "testGroupUUID", + TaskUUIDs: []string{"testTaskUUID1", "testTaskUUID2", "testTaskUUID3"}, + } +} + +func (b *Backend) GetConfig() *config.Config { + return b.cnf +} + +func (b *Backend) GetClient() dynamodbiface.DynamoDBAPI { + return b.client +} + +func (b *Backend) GetGroupMetaForTest(groupUUID string) (*tasks.GroupMeta, error) { + return b.getGroupMeta(groupUUID) +} + +func (b *Backend) UnmarshalGroupMetaGetItemResultForTest(result *dynamodb.GetItemOutput) (*tasks.GroupMeta, error) { + return b.unmarshalGroupMetaGetItemResult(result) +} + +func (b *Backend) UnmarshalTaskStateGetItemResultForTest(result *dynamodb.GetItemOutput) (*tasks.TaskState, error) { + return b.unmarshalTaskStateGetItemResult(result) +} + +func (b *Backend) SetTaskStateForTest(taskState *tasks.TaskState) error { + return b.setTaskState(taskState) +} + +func (b *Backend) ChordTriggeredForTest(groupUUID string) error { + return b.chordTriggered(groupUUID) +} + +func (b *Backend) UpdateGroupMetaLockForTest(groupUUID string, status bool) error { + return b.updateGroupMetaLock(groupUUID, status) +} + +func (b *Backend) UnlockGroupMetaForTest(groupUUID string) error { + return b.unlockGroupMeta(groupUUID) +} + +func (b *Backend) LockGroupMetaForTest(groupUUID string) error { + return b.lockGroupMeta(groupUUID) +} + +func (b *Backend) GetStatesForTest(taskUUIDs ...string) ([]*tasks.TaskState, error) { + return b.getStates(taskUUIDs) +} + +func (b *Backend) UpdateToFailureStateWithErrorForTest(taskState *tasks.TaskState) error { + return b.updateToFailureStateWithError(taskState) +} + +func (b *Backend) TableExistsForTest(tableName string, tableNames []*string) bool { + return b.tableExists(tableName, tableNames) +} + +func (b *Backend) CheckRequiredTablesIfExistForTest() error { + return b.checkRequiredTablesIfExist() +} diff --git a/v2/backends/dynamodb/dynamodb_test.go b/v2/backends/dynamodb/dynamodb_test.go new file mode 100644 index 000000000..1772fe455 --- /dev/null +++ b/v2/backends/dynamodb/dynamodb_test.go @@ -0,0 +1,658 @@ +package dynamodb_test + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/RichardKnop/machinery/v2/backends/dynamodb" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + + awsdynamodb "github.com/aws/aws-sdk-go/service/dynamodb" +) + +func TestNew(t *testing.T) { + // should call t.Skip if not connected to internet + backend := dynamodb.New(dynamodb.TestCnf) + assert.IsType(t, new(dynamodb.Backend), backend) +} + +func TestInitGroup(t *testing.T) { + groupUUID := "testGroupUUID" + taskUUIDs := []string{"testTaskUUID1", "testTaskUUID2", "testTaskUUID3"} + log.INFO.Println(dynamodb.TestDynamoDBBackend.GetConfig()) + + err := dynamodb.TestDynamoDBBackend.InitGroup(groupUUID, taskUUIDs) + assert.Nil(t, err) + + err = dynamodb.TestErrDynamoDBBackend.InitGroup(groupUUID, taskUUIDs) + assert.NotNil(t, err) + + // assert proper TTL value is set in InitGroup() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 3 * 3600 // results should expire after 3 hours + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + // Override DynamoDB PutItem() behavior + var isPutItemCalled bool + client.PutItemOverride = func(input *awsdynamodb.PutItemInput) (*awsdynamodb.PutItemOutput, error) { + isPutItemCalled = true + assert.NotNil(t, input) + + actualTTLStr := *input.Item["TTL"].N + expectedTTLTime := time.Now().Add(3 * time.Hour) + assertTTLValue(t, expectedTTLTime, actualTTLStr) + + return &awsdynamodb.PutItemOutput{}, nil + } + err = dynamodb.TestDynamoDBBackend.InitGroup(groupUUID, taskUUIDs) + assert.Nil(t, err) + assert.True(t, isPutItemCalled) + client.ResetOverrides() +} + +func assertTTLValue(t *testing.T, expectedTTLTime time.Time, actualEncodedTTLValue string) { + actualTTLTimestamp, err := strconv.ParseInt(actualEncodedTTLValue, 10, 64) + assert.Nil(t, err) + actualTTLTime := time.Unix(actualTTLTimestamp, 0) + assert.WithinDuration(t, expectedTTLTime, actualTTLTime, time.Second) +} + +func TestGroupCompleted(t *testing.T) { + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + tableName := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + // Override DynamoDB BatchGetItem() behavior + var isBatchGetItemCalled bool + client.BatchGetItemOverride = func(input *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + isBatchGetItemCalled = true + assert.NotNil(t, input) + assert.Nil(t, input.Validate()) + + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: { + {"State": {S: aws.String(tasks.StateSuccess)}}, + {"State": {S: aws.String(tasks.StateSuccess)}}, + {"State": {S: aws.String(tasks.StateFailure)}}, + }, + }, + }, nil + } + groupUUID := "testGroupUUID" + isCompleted, err := dynamodb.TestDynamoDBBackend.GroupCompleted(groupUUID, 3) + assert.Nil(t, err) + assert.True(t, isCompleted) + assert.True(t, isBatchGetItemCalled) + client.ResetOverrides() +} +func TestGroupCompletedReturnsError(t *testing.T) { + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + client.BatchGetItemOverride = func(input *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + return nil, fmt.Errorf("Simulating error from AWS") + } + isCompleted, err := dynamodb.TestDynamoDBBackend.GroupCompleted("test", 3) + assert.NotNil(t, err) + assert.False(t, isCompleted) + client.ResetOverrides() +} + +// TestGroupCompletedReturnsFalse tests that the GroupCompleted() returns false when some tasks have not yet finished. +func TestGroupCompletedReturnsFalse(t *testing.T) { + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + tableName := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + // Override DynamoDB BatchGetItem() behavior + client.BatchGetItemOverride = func(_ *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: { + {"State": {S: aws.String(tasks.StateSuccess)}}, + {"State": {S: aws.String(tasks.StateFailure)}}, + {"State": {S: aws.String(tasks.StatePending)}}, + }, + }, + }, nil + } + isCompleted, err := dynamodb.TestDynamoDBBackend.GroupCompleted("testGroup", 3) + assert.Nil(t, err) + assert.False(t, isCompleted) + client.ResetOverrides() +} + +// TestGroupCompletedReturnsFalse tests that the GroupCompleted() retries the the request until MaxFetchAttempts before returning an error +func TestGroupCompletedRetries(t *testing.T) { + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + tableName := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + // Override DynamoDB BatchGetItem() behavior + var countBatchGetItemAPICalls int + client.BatchGetItemOverride = func(_ *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + countBatchGetItemAPICalls++ + + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: { + {"State": {S: aws.String(tasks.StateSuccess)}}, + }, + }, + UnprocessedKeys: map[string]*awsdynamodb.KeysAndAttributes{ + tableName: { + Keys: []map[string]*awsdynamodb.AttributeValue{ + {"TaskUUID": {S: aws.String("unfetchedTaskUUID1")}}, + {"TaskUUID": {S: aws.String("unfetchedTaskUUID2")}}, + }, + }, + }, + }, nil + } + _, err := dynamodb.TestDynamoDBBackend.GroupCompleted("testGroup", 3) + assert.NotNil(t, err) + assert.Equal(t, dynamodb.MaxFetchAttempts, countBatchGetItemAPICalls) + client.ResetOverrides() +} + +// TestGroupCompletedReturnsFalse tests that the GroupCompleted() retries the the request and returns success if all keys are fetched on retries. +func TestGroupCompletedRetrieSuccess(t *testing.T) { + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + tableName := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + // Override DynamoDB BatchGetItem() behavior + var countBatchGetItemAPICalls int + client.BatchGetItemOverride = func(_ *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + countBatchGetItemAPICalls++ + + // simulate unfetched keys on 1st attempt. + if countBatchGetItemAPICalls == 1 { + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: {}, // no keys returned in this attempt. + }, + UnprocessedKeys: map[string]*awsdynamodb.KeysAndAttributes{ + tableName: { + Keys: []map[string]*awsdynamodb.AttributeValue{ + {"TaskUUID": {S: aws.String("unfetchedTaskUUID1")}}, + {"TaskUUID": {S: aws.String("unfetchedTaskUUID2")}}, + {"TaskUUID": {S: aws.String("unfetchedTaskUUID3")}}, + }, + }, + }, + }, nil + + } + + // Return all keys in subsequent attempts. + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: { + {"State": {S: aws.String(tasks.StateSuccess)}}, + {"State": {S: aws.String(tasks.StateSuccess)}}, + {"State": {S: aws.String(tasks.StateSuccess)}}, + }, + }, + }, nil + } + isCompleted, err := dynamodb.TestDynamoDBBackend.GroupCompleted("testGroup", 3) + assert.Nil(t, err) + assert.True(t, isCompleted) + assert.Equal(t, 2, countBatchGetItemAPICalls) + client.ResetOverrides() +} + +func TestPrivateFuncGetGroupMeta(t *testing.T) { + groupUUID := "testGroupUUID" + meta, err := dynamodb.TestDynamoDBBackend.GetGroupMetaForTest(groupUUID) + item := tasks.GroupMeta{ + GroupUUID: "testGroupUUID", + Lock: false, + ChordTriggered: false, + TaskUUIDs: []string{ + "testTaskUUID1", + "testTaskUUID2", + "testTaskUUID3", + }, + } + assert.Nil(t, err) + assert.EqualValues(t, item, *meta) + _, err = dynamodb.TestErrDynamoDBBackend.GetGroupMetaForTest(groupUUID) + assert.NotNil(t, err) +} + +func TestPrivateFuncUnmarshalTaskStateGetItemResult(t *testing.T) { + result := awsdynamodb.GetItemOutput{ + Item: map[string]*awsdynamodb.AttributeValue{ + "Error": { + NULL: aws.Bool(true), + }, + "State": { + S: aws.String(tasks.StatePending), + }, + "TaskUUID": { + S: aws.String("testTaskUUID1"), + }, + "Results:": { + NULL: aws.Bool(true), + }, + }, + } + + invalidResult := awsdynamodb.GetItemOutput{ + Item: map[string]*awsdynamodb.AttributeValue{ + "Error": { + BOOL: aws.Bool(true), + }, + "State": { + S: aws.String(tasks.StatePending), + }, + "TaskUUID": { + S: aws.String("testTaskUUID1"), + }, + "Results:": { + BOOL: aws.Bool(true), + }, + }, + } + + item := tasks.TaskState{ + TaskUUID: "testTaskUUID1", + Results: nil, + State: tasks.StatePending, + Error: "", + } + state, err := dynamodb.TestErrDynamoDBBackend.UnmarshalTaskStateGetItemResultForTest(&result) + assert.Nil(t, err) + assert.EqualValues(t, item, *state) + + _, err = dynamodb.TestDynamoDBBackend.UnmarshalTaskStateGetItemResultForTest(nil) + assert.NotNil(t, err) + + _, err = dynamodb.TestDynamoDBBackend.UnmarshalTaskStateGetItemResultForTest(&invalidResult) + assert.NotNil(t, err) + +} + +func TestPrivateFuncUnmarshalGroupMetaGetItemResult(t *testing.T) { + result := awsdynamodb.GetItemOutput{ + Item: map[string]*awsdynamodb.AttributeValue{ + "TaskUUIDs": { + L: []*awsdynamodb.AttributeValue{ + { + S: aws.String("testTaskUUID1"), + }, + { + S: aws.String("testTaskUUID2"), + }, + { + S: aws.String("testTaskUUID3"), + }, + }, + }, + "ChordTriggered": { + BOOL: aws.Bool(false), + }, + "GroupUUID": { + S: aws.String("testGroupUUID"), + }, + "Lock": { + BOOL: aws.Bool(false), + }, + }, + } + + invalidResult := awsdynamodb.GetItemOutput{ + Item: map[string]*awsdynamodb.AttributeValue{ + "TaskUUIDs": { + L: []*awsdynamodb.AttributeValue{ + { + S: aws.String("testTaskUUID1"), + }, + { + S: aws.String("testTaskUUID2"), + }, + { + S: aws.String("testTaskUUID3"), + }, + }, + }, + "ChordTriggered": { + S: aws.String("false"), // this attribute is invalid + }, + "GroupUUID": { + S: aws.String("testGroupUUID"), + }, + "Lock": { + BOOL: aws.Bool(false), + }, + }, + } + + item := tasks.GroupMeta{ + GroupUUID: "testGroupUUID", + Lock: false, + ChordTriggered: false, + TaskUUIDs: []string{ + "testTaskUUID1", + "testTaskUUID2", + "testTaskUUID3", + }, + } + meta, err := dynamodb.TestErrDynamoDBBackend.UnmarshalGroupMetaGetItemResultForTest(&result) + assert.Nil(t, err) + assert.EqualValues(t, item, *meta) + _, err = dynamodb.TestErrDynamoDBBackend.UnmarshalGroupMetaGetItemResultForTest(nil) + assert.NotNil(t, err) + + _, err = dynamodb.TestErrDynamoDBBackend.UnmarshalGroupMetaGetItemResultForTest(&invalidResult) + assert.NotNil(t, err) + +} + +func TestPrivateFuncSetTaskState(t *testing.T) { + signature := &tasks.Signature{ + Name: "Test", + Args: []tasks.Arg{ + { + Type: "int64", + Value: 1, + }, + }, + } + state := tasks.NewPendingTaskState(signature) + err := dynamodb.TestErrDynamoDBBackend.SetTaskStateForTest(state) + assert.NotNil(t, err) + err = dynamodb.TestDynamoDBBackend.SetTaskStateForTest(state) + assert.Nil(t, err) +} + +// verifyUpdateInput is a helper function to verify valid dynamoDB update input. +func verifyUpdateInput(t *testing.T, input *awsdynamodb.UpdateItemInput, expectedTaskID string, expectedState string, expectedTTLTime time.Time) { + assert.NotNil(t, input) + + // verify task ID + assert.Equal(t, expectedTaskID, *input.Key["TaskUUID"].S) + + // verify task state + assert.Equal(t, expectedState, *input.ExpressionAttributeValues[":s"].S) + + // Verify TTL + if !expectedTTLTime.IsZero() { + actualTTLStr := *input.ExpressionAttributeValues[":t"].N + assertTTLValue(t, expectedTTLTime, actualTTLStr) + } +} + +func TestSetStateSuccess(t *testing.T) { + signature := &tasks.Signature{UUID: "testTaskUUID"} + + // assert correct task ID, state and TTL value is set in SetStateSuccess() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 3 * 3600 // results should expire after 3 hours + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + // Override DynamoDB UpdateItem() behavior + var isUpdateItemCalled bool + client.UpdateItemOverride = func(input *awsdynamodb.UpdateItemInput) (*awsdynamodb.UpdateItemOutput, error) { + isUpdateItemCalled = true + verifyUpdateInput(t, input, signature.UUID, tasks.StateSuccess, time.Now().Add(3*time.Hour)) + return &awsdynamodb.UpdateItemOutput{}, nil + } + + err := dynamodb.TestDynamoDBBackend.SetStateSuccess(signature, nil) + assert.Nil(t, err) + assert.True(t, isUpdateItemCalled) + client.ResetOverrides() +} + +func TestSetStateFailure(t *testing.T) { + signature := &tasks.Signature{UUID: "testTaskUUID"} + + // assert correct task ID, state and TTL value is set in SetStateFailure() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 2 * 3600 // results should expire after 2 hours + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + // Override DynamoDB UpdateItem() behavior + var isUpdateItemCalled bool + client.UpdateItemOverride = func(input *awsdynamodb.UpdateItemInput) (*awsdynamodb.UpdateItemOutput, error) { + isUpdateItemCalled = true + verifyUpdateInput(t, input, signature.UUID, tasks.StateFailure, time.Now().Add(2*time.Hour)) + return &awsdynamodb.UpdateItemOutput{}, nil + } + + err := dynamodb.TestDynamoDBBackend.SetStateFailure(signature, "Some error occurred") + assert.Nil(t, err) + assert.True(t, isUpdateItemCalled) + client.ResetOverrides() +} + +func TestSetStateReceived(t *testing.T) { + signature := &tasks.Signature{UUID: "testTaskUUID"} + + // assert correct task ID, state and *no* TTL value is set in SetStateReceived() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 2 * 3600 // results should expire after 2 hours (ignored for this state) + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + var isUpdateItemCalled bool + client.UpdateItemOverride = func(input *awsdynamodb.UpdateItemInput) (*awsdynamodb.UpdateItemOutput, error) { + isUpdateItemCalled = true + verifyUpdateInput(t, input, signature.UUID, tasks.StateReceived, time.Time{}) + return &awsdynamodb.UpdateItemOutput{}, nil + } + + err := dynamodb.TestDynamoDBBackend.SetStateReceived(signature) + assert.Nil(t, err) + assert.True(t, isUpdateItemCalled) + client.ResetOverrides() +} + +func TestSetStateStarted(t *testing.T) { + signature := &tasks.Signature{UUID: "testTaskUUID"} + + // assert correct task ID, state and *no* TTL value is set in SetStateStarted() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 2 * 3600 // results should expire after 2 hours (ignored for this state) + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + var isUpdateItemCalled bool + client.UpdateItemOverride = func(input *awsdynamodb.UpdateItemInput) (*awsdynamodb.UpdateItemOutput, error) { + isUpdateItemCalled = true + verifyUpdateInput(t, input, signature.UUID, tasks.StateStarted, time.Time{}) + return &awsdynamodb.UpdateItemOutput{}, nil + } + + err := dynamodb.TestDynamoDBBackend.SetStateStarted(signature) + assert.Nil(t, err) + assert.True(t, isUpdateItemCalled) + client.ResetOverrides() +} + +func TestSetStateRetry(t *testing.T) { + signature := &tasks.Signature{UUID: "testTaskUUID"} + + // assert correct task ID, state and *no* TTL value is set in SetStateStarted() + dynamodb.TestDynamoDBBackend.GetConfig().ResultsExpireIn = 2 * 3600 // results should expire after 2 hours (ignored for this state) + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + var isUpdateItemCalled bool + client.UpdateItemOverride = func(input *awsdynamodb.UpdateItemInput) (*awsdynamodb.UpdateItemOutput, error) { + isUpdateItemCalled = true + verifyUpdateInput(t, input, signature.UUID, tasks.StateRetry, time.Time{}) + return &awsdynamodb.UpdateItemOutput{}, nil + } + + err := dynamodb.TestDynamoDBBackend.SetStateRetry(signature) + assert.Nil(t, err) + assert.True(t, isUpdateItemCalled) + client.ResetOverrides() +} + +func TestGroupTaskStates(t *testing.T) { + expectedStates := map[string]*tasks.TaskState{ + "testTaskUUID1": { + TaskUUID: "testTaskUUID1", + Results: nil, + State: tasks.StatePending, + Error: "", + }, + "testTaskUUID2": { + TaskUUID: "testTaskUUID2", + Results: nil, + State: tasks.StateStarted, + Error: "", + }, + "testTaskUUID3": { + TaskUUID: "testTaskUUID3", + Results: nil, + State: tasks.StateSuccess, + Error: "", + }, + } + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + tableName := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + client.BatchGetItemOverride = func(input *awsdynamodb.BatchGetItemInput) (*awsdynamodb.BatchGetItemOutput, error) { + assert.Nil(t, input.Validate()) + return &awsdynamodb.BatchGetItemOutput{ + Responses: map[string][]map[string]*awsdynamodb.AttributeValue{ + tableName: { + { + "TaskUUID": {S: aws.String("testTaskUUID1")}, + "Results:": {NULL: aws.Bool(true)}, + "State": {S: aws.String(tasks.StatePending)}, + "Error": {NULL: aws.Bool(true)}, + }, + { + "TaskUUID": {S: aws.String("testTaskUUID2")}, + "Results:": {NULL: aws.Bool(true)}, + "State": {S: aws.String(tasks.StateStarted)}, + "Error": {NULL: aws.Bool(true)}, + }, + { + "TaskUUID": {S: aws.String("testTaskUUID3")}, + "Results:": {NULL: aws.Bool(true)}, + "State": {S: aws.String(tasks.StateSuccess)}, + "Error": {NULL: aws.Bool(true)}, + }, + }, + }, + }, nil + } + defer client.ResetOverrides() + + states, err := dynamodb.TestDynamoDBBackend.GroupTaskStates("testGroupUUID", 3) + assert.Nil(t, err) + for _, s := range states { + assert.EqualValues(t, *s, *expectedStates[s.TaskUUID]) + } +} + +func TestTriggerChord(t *testing.T) { + groupUUID := "testGroupUUID" + triggered, err := dynamodb.TestDynamoDBBackend.TriggerChord(groupUUID) + assert.Nil(t, err) + assert.True(t, triggered) +} + +func TestGetState(t *testing.T) { + taskUUID := "testTaskUUID1" + expectedState := &tasks.TaskState{ + TaskUUID: "testTaskUUID1", + Results: nil, + State: tasks.StatePending, + Error: "", + } + client := dynamodb.TestDynamoDBBackend.GetClient().(*dynamodb.TestDynamoDBClient) + client.GetItemOverride = func(input *awsdynamodb.GetItemInput) (*awsdynamodb.GetItemOutput, error) { + return &awsdynamodb.GetItemOutput{ + Item: map[string]*awsdynamodb.AttributeValue{ + "TaskUUID": {S: aws.String("testTaskUUID1")}, + "Results:": {NULL: aws.Bool(true)}, + "State": {S: aws.String(tasks.StatePending)}, + "Error": {NULL: aws.Bool(false)}, + }, + }, nil + } + defer client.ResetOverrides() + + state, err := dynamodb.TestDynamoDBBackend.GetState(taskUUID) + assert.Nil(t, err) + assert.EqualValues(t, expectedState, state) +} + +func TestPurgeState(t *testing.T) { + taskUUID := "testTaskUUID1" + err := dynamodb.TestDynamoDBBackend.PurgeState(taskUUID) + assert.Nil(t, err) + + err = dynamodb.TestErrDynamoDBBackend.PurgeState(taskUUID) + assert.NotNil(t, err) +} + +func TestPurgeGroupMeta(t *testing.T) { + groupUUID := "GroupUUID" + err := dynamodb.TestDynamoDBBackend.PurgeGroupMeta(groupUUID) + assert.Nil(t, err) + + err = dynamodb.TestErrDynamoDBBackend.PurgeGroupMeta(groupUUID) + assert.NotNil(t, err) +} + +func TestPrivateFuncLockGroupMeta(t *testing.T) { + groupUUID := "GroupUUID" + err := dynamodb.TestDynamoDBBackend.LockGroupMetaForTest(groupUUID) + assert.Nil(t, err) + err = dynamodb.TestErrDynamoDBBackend.LockGroupMetaForTest(groupUUID) + assert.NotNil(t, err) +} + +func TestPrivateFuncUnLockGroupMeta(t *testing.T) { + groupUUID := "GroupUUID" + err := dynamodb.TestDynamoDBBackend.UnlockGroupMetaForTest(groupUUID) + assert.Nil(t, err) + err = dynamodb.TestErrDynamoDBBackend.UnlockGroupMetaForTest(groupUUID) + assert.NotNil(t, err) +} + +func TestPrivateFuncChordTriggered(t *testing.T) { + groupUUID := "GroupUUID" + err := dynamodb.TestDynamoDBBackend.ChordTriggeredForTest(groupUUID) + assert.Nil(t, err) + err = dynamodb.TestErrDynamoDBBackend.ChordTriggeredForTest(groupUUID) + assert.NotNil(t, err) +} + +func TestDynamoDBPrivateFuncUpdateGroupMetaLock(t *testing.T) { + groupUUID := "GroupUUID" + err := dynamodb.TestDynamoDBBackend.UpdateGroupMetaLockForTest(groupUUID, true) + assert.Nil(t, err) + err = dynamodb.TestErrDynamoDBBackend.UpdateGroupMetaLockForTest(groupUUID, true) + assert.NotNil(t, err) +} + +func TestPrivateFuncUpdateToFailureStateWithError(t *testing.T) { + signature := &tasks.Signature{ + Name: "Test", + Args: []tasks.Arg{ + { + Type: "int64", + Value: 1, + }, + }, + } + + state := tasks.NewFailureTaskState(signature, "This is an error") + err := dynamodb.TestDynamoDBBackend.UpdateToFailureStateWithErrorForTest(state) + assert.Nil(t, err) +} + +func TestPrivateFuncTableExistsForTest(t *testing.T) { + tables := []*string{aws.String("foo")} + assert.False(t, dynamodb.TestDynamoDBBackend.TableExistsForTest("bar", tables)) + assert.True(t, dynamodb.TestDynamoDBBackend.TableExistsForTest("foo", tables)) +} + +func TestPrivateFuncCheckRequiredTablesIfExistForTest(t *testing.T) { + err := dynamodb.TestDynamoDBBackend.CheckRequiredTablesIfExistForTest() + assert.Nil(t, err) + taskTable := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable + groupTable := dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.GroupMetasTable + err = dynamodb.TestErrDynamoDBBackend.CheckRequiredTablesIfExistForTest() + assert.NotNil(t, err) + dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable = "foo" + err = dynamodb.TestDynamoDBBackend.CheckRequiredTablesIfExistForTest() + assert.NotNil(t, err) + dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.TaskStatesTable = taskTable + dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.GroupMetasTable = "foo" + err = dynamodb.TestDynamoDBBackend.CheckRequiredTablesIfExistForTest() + assert.NotNil(t, err) + dynamodb.TestDynamoDBBackend.GetConfig().DynamoDB.GroupMetasTable = groupTable +} diff --git a/v2/backends/eager/eager.go b/v2/backends/eager/eager.go new file mode 100644 index 000000000..292b63b73 --- /dev/null +++ b/v2/backends/eager/eager.go @@ -0,0 +1,208 @@ +package eager + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// ErrGroupNotFound ... +type ErrGroupNotFound struct { + groupUUID string +} + +// NewErrGroupNotFound returns new instance of ErrGroupNotFound +func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound { + return ErrGroupNotFound{groupUUID: groupUUID} +} + +// Error implements error interface +func (e ErrGroupNotFound) Error() string { + return fmt.Sprintf("Group not found: %v", e.groupUUID) +} + +// ErrTasknotFound ... +type ErrTasknotFound struct { + taskUUID string +} + +// NewErrTasknotFound returns new instance of ErrTasknotFound +func NewErrTasknotFound(taskUUID string) ErrTasknotFound { + return ErrTasknotFound{taskUUID: taskUUID} +} + +// Error implements error interface +func (e ErrTasknotFound) Error() string { + return fmt.Sprintf("Task not found: %v", e.taskUUID) +} + +// Backend represents an "eager" in-memory result backend +type Backend struct { + common.Backend + groups map[string][]string + tasks map[string][]byte + stateMutex sync.Mutex +} + +// New creates EagerBackend instance +func New() iface.Backend { + return &Backend{ + Backend: common.NewBackend(new(config.Config)), + groups: make(map[string][]string), + tasks: make(map[string][]byte), + } +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + tasks := make([]string, 0, len(taskUUIDs)) + // copy every task + tasks = append(tasks, taskUUIDs...) + + b.groups[groupUUID] = tasks + return nil +} + +// GroupCompleted returns true if all tasks in a group finished +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + tasks, ok := b.groups[groupUUID] + if !ok { + return false, NewErrGroupNotFound(groupUUID) + } + + var countSuccessTasks = 0 + for _, v := range tasks { + t, err := b.GetState(v) + if err != nil { + return false, err + } + + if t.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + taskUUIDs, ok := b.groups[groupUUID] + if !ok { + return nil, NewErrGroupNotFound(groupUUID) + } + + ret := make([]*tasks.TaskState, 0, groupTaskCount) + for _, taskUUID := range taskUUIDs { + t, err := b.GetState(taskUUID) + if err != nil { + return nil, err + } + + ret = append(ret, t) + } + + return ret, nil +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never trigerred multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + return true, nil +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + state := tasks.NewPendingTaskState(signature) + return b.updateState(state) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + state := tasks.NewReceivedTaskState(signature) + return b.updateState(state) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + state := tasks.NewStartedTaskState(signature) + return b.updateState(state) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + state := tasks.NewRetryTaskState(signature) + return b.updateState(state) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + state := tasks.NewSuccessTaskState(signature, results) + return b.updateState(state) +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + state := tasks.NewFailureTaskState(signature, err) + return b.updateState(state) +} + +// GetState returns the latest task state +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + tasktStateBytes, ok := b.tasks[taskUUID] + if !ok { + return nil, NewErrTasknotFound(taskUUID) + } + + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(tasktStateBytes)) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + return nil, fmt.Errorf("Failed to unmarshal task state %v", b) + } + + return state, nil +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + _, ok := b.tasks[taskUUID] + if !ok { + return NewErrTasknotFound(taskUUID) + } + + delete(b.tasks, taskUUID) + return nil +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + _, ok := b.groups[groupUUID] + if !ok { + return NewErrGroupNotFound(groupUUID) + } + + delete(b.groups, groupUUID) + return nil +} + +func (b *Backend) updateState(s *tasks.TaskState) error { + // simulate the behavior of json marshal/unmarshal + b.stateMutex.Lock() + defer b.stateMutex.Unlock() + msg, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("Marshal task state error: %v", err) + } + + b.tasks[s.TaskUUID] = msg + return nil +} diff --git a/v2/backends/eager/eager_test.go b/v2/backends/eager/eager_test.go new file mode 100644 index 000000000..11c696005 --- /dev/null +++ b/v2/backends/eager/eager_test.go @@ -0,0 +1,342 @@ +package eager_test + +import ( + "encoding/json" + "testing" + + "github.com/RichardKnop/machinery/v2/backends/eager" + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/suite" +) + +type EagerBackendTestSuite struct { + suite.Suite + + backend iface.Backend + st []*tasks.Signature + groups []struct { + id string + tasks []string + } +} + +func (s *EagerBackendTestSuite) SetupSuite() { + // prepare common test data + s.backend = eager.New() + + // 2 non-group state + s.st = []*tasks.Signature{ + {UUID: "1"}, + {UUID: "2"}, + {UUID: "3"}, + {UUID: "4"}, + {UUID: "5"}, + {UUID: "6"}, + } + + for _, t := range s.st { + s.backend.SetStatePending(t) + } + + // groups + s.groups = []struct { + id string + tasks []string + }{ + {"group1", []string{"1-3", "1-4"}}, + {"group2", []string{"2-1", "2-2", "2-3"}}, + {"group3", []string(nil)}, + {"group4", []string{"4-1", "4-2", "4-3", "4-4"}}, + {"group5", []string{"5-1", "5-2"}}, + } + + for _, g := range s.groups { + for _, t := range g.tasks { + sig := &tasks.Signature{ + UUID: t, + GroupUUID: g.id, + GroupTaskCount: len(g.tasks), + } + s.st = append(s.st, sig) + + // default state is pending + s.backend.SetStatePending(sig) + } + + s.Nil(s.backend.InitGroup(g.id, g.tasks)) + } + + // prepare for TestInitGroup + s.Nil(s.backend.PurgeGroupMeta(s.groups[4].id)) +} + +// +// Test Cases +// + +func (s *EagerBackendTestSuite) TestInitGroup() { + // group 5 + { + g := s.groups[4] + s.Nil(s.backend.InitGroup(g.id, g.tasks)) + } + + // group3 -- nil as task list + { + g := s.groups[2] + s.Nil(s.backend.InitGroup(g.id, g.tasks)) + } +} + +func (s *EagerBackendTestSuite) TestGroupCompleted() { + // group 1 + { + // all tasks are pending + g := s.groups[0] + completed, err := s.backend.GroupCompleted(g.id, len(g.tasks)) + s.False(completed) + s.Nil(err) + + // make these tasks success + for _, id := range g.tasks { + t := s.getTaskSignature(id) + s.NotNil(t) + if t == nil { + break + } + + s.backend.SetStateSuccess(t, nil) + } + + completed, err = s.backend.GroupCompleted(g.id, len(g.tasks)) + s.True(completed) + s.Nil(err) + } + + // group 2 + { + g := s.groups[1] + + completed, err := s.backend.GroupCompleted(g.id, len(g.tasks)) + s.False(completed) + s.Nil(err) + + // make these tasks failure + for _, id := range g.tasks { + t := s.getTaskSignature(id) + s.NotNil(t) + if t == nil { + break + } + + s.backend.SetStateFailure(t, "just a test") + } + + completed, err = s.backend.GroupCompleted(g.id, len(g.tasks)) + s.True(completed) + s.Nil(err) + } + + { + // call on a not-existed group + completed, err := s.backend.GroupCompleted("", 0) + s.False(completed) + s.NotNil(err) + } +} + +func (s *EagerBackendTestSuite) TestGroupTaskStates() { + // group 4 + { + g := s.groups[3] + + // set failure state with taskUUID as error message + for _, id := range g.tasks { + t := s.getTaskSignature(id) + s.NotNil(t) + if t == nil { + break + } + + s.backend.SetStateFailure(t, t.UUID) + } + + // get states back + ts, err := s.backend.GroupTaskStates(g.id, len(g.tasks)) + s.NotNil(ts) + s.Nil(err) + for _, t := range ts { + s.Equal(t.TaskUUID, t.Error) + } + } + + { + // call on a not-existed group + ts, err := s.backend.GroupTaskStates("", 0) + s.Nil(ts) + s.NotNil(err) + } +} + +func (s *EagerBackendTestSuite) TestSetStatePending() { + // task 1 + { + t := s.st[0] + + // change this state to receiving + s.backend.SetStateReceived(t) + + // change it back to pending + s.backend.SetStatePending(t) + + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + if st != nil { + s.Equal(tasks.StatePending, st.State) + } + } +} + +func (s *EagerBackendTestSuite) TestSetStateReceived() { + // task2 + { + t := s.st[1] + s.backend.SetStateReceived(t) + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + if st != nil { + s.Equal(tasks.StateReceived, st.State) + } + } +} + +func (s *EagerBackendTestSuite) TestSetStateStarted() { + // task3 + { + t := s.st[2] + s.backend.SetStateStarted(t) + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + if st != nil { + s.Equal(tasks.StateStarted, st.State) + } + } +} + +func (s *EagerBackendTestSuite) TestSetStateSuccess() { + // task4 + { + t := s.st[3] + taskResults := []*tasks.TaskResult{ + { + Type: "float64", + Value: json.Number("300.0"), + }, + } + s.backend.SetStateSuccess(t, taskResults) + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + s.NotNil(st) + + s.Equal(tasks.StateSuccess, st.State) + s.Equal(taskResults, st.Results) + } +} + +func (s *EagerBackendTestSuite) TestSetStateFailure() { + // task5 + { + t := s.st[4] + s.backend.SetStateFailure(t, "error") + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + if st != nil { + s.Equal(tasks.StateFailure, st.State) + s.Equal("error", st.Error) + } + } +} + +func (s *EagerBackendTestSuite) TestSetStateRetry() { + // task6 + { + t := s.st[5] + s.backend.SetStateRetry(t) + st, err := s.backend.GetState(t.UUID) + s.Nil(err) + if st != nil { + s.Equal(tasks.StateRetry, st.State) + } + } +} + +func (s *EagerBackendTestSuite) TestGetState() { + // get something not existed -- empty string + st, err := s.backend.GetState("") + s.Nil(st) + s.NotNil(err) +} + +func (s *EagerBackendTestSuite) TestPurgeState() { + // task6 + { + t := s.st[5] + st, err := s.backend.GetState(t.UUID) + s.NotNil(st) + s.Nil(err) + + // purge it + s.Nil(s.backend.PurgeState(t.UUID)) + + // should be not found + st, err = s.backend.GetState(t.UUID) + s.Nil(st) + s.NotNil(err) + } + + { + // purge a not-existed state + s.NotNil(s.backend.PurgeState("")) + } +} + +func (s *EagerBackendTestSuite) TestPurgeGroupMeta() { + // group4 + { + g := s.groups[3] + ts, err := s.backend.GroupTaskStates(g.id, len(g.tasks)) + s.NotNil(ts) + s.Nil(err) + + // purge group + s.Nil(s.backend.PurgeGroupMeta(g.id)) + + // should be not found + ts, err = s.backend.GroupTaskStates(g.id, len(g.tasks)) + s.Nil(ts) + s.NotNil(err) + } + + { + // purge a not-existed group + s.NotNil(s.backend.PurgeGroupMeta("")) + } +} + +// +// internal method +// +func (s *EagerBackendTestSuite) getTaskSignature(taskUUID string) *tasks.Signature { + for _, v := range s.st { + if v.UUID == taskUUID { + return v + } + } + + return nil +} + +func TestEagerBackendMain(t *testing.T) { + suite.Run(t, &EagerBackendTestSuite{}) +} diff --git a/v2/backends/iface/interfaces.go b/v2/backends/iface/interfaces.go new file mode 100644 index 000000000..f1432ae6a --- /dev/null +++ b/v2/backends/iface/interfaces.go @@ -0,0 +1,28 @@ +package iface + +import ( + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Backend - a common interface for all result backends +type Backend interface { + // Group related functions + InitGroup(groupUUID string, taskUUIDs []string) error + GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) + GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) + TriggerChord(groupUUID string) (bool, error) + + // Setting / getting task state + SetStatePending(signature *tasks.Signature) error + SetStateReceived(signature *tasks.Signature) error + SetStateStarted(signature *tasks.Signature) error + SetStateRetry(signature *tasks.Signature) error + SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error + SetStateFailure(signature *tasks.Signature, err string) error + GetState(taskUUID string) (*tasks.TaskState, error) + + // Purging stored stored tasks states and group meta data + IsAMQP() bool + PurgeState(taskUUID string) error + PurgeGroupMeta(groupUUID string) error +} diff --git a/v2/backends/memcache/memcache.go b/v2/backends/memcache/memcache.go new file mode 100644 index 000000000..1f6ae65a9 --- /dev/null +++ b/v2/backends/memcache/memcache.go @@ -0,0 +1,292 @@ +package memcache + +import ( + "bytes" + "encoding/json" + "time" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + + gomemcache "github.com/bradfitz/gomemcache/memcache" +) + +// Backend represents a Memcache result backend +type Backend struct { + common.Backend + servers []string + client *gomemcache.Client +} + +// New creates Backend instance +func New(cnf *config.Config, servers []string) iface.Backend { + return &Backend{ + Backend: common.NewBackend(cnf), + servers: servers, + } +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + groupMeta := &tasks.GroupMeta{ + GroupUUID: groupUUID, + TaskUUIDs: taskUUIDs, + CreatedAt: time.Now().UTC(), + } + + encoded, err := json.Marshal(&groupMeta) + if err != nil { + return err + } + + return b.getClient().Set(&gomemcache.Item{ + Key: groupUUID, + Value: encoded, + Expiration: b.getExpirationTimestamp(), + }) +} + +// GroupCompleted returns true if all tasks in a group finished +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + taskStates, err := b.getStates(groupMeta.TaskUUIDs...) + if err != nil { + return false, err + } + + var countSuccessTasks = 0 + for _, taskState := range taskStates { + if taskState.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return []*tasks.TaskState{}, err + } + + return b.getStates(groupMeta.TaskUUIDs...) +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never trigerred multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + // Chord has already been triggered, return false (should not trigger again) + if groupMeta.ChordTriggered { + return false, nil + } + + // If group meta is locked, wait until it's unlocked + for groupMeta.Lock { + groupMeta, _ = b.getGroupMeta(groupUUID) + log.WARNING.Print("Group meta locked, waiting") + time.Sleep(time.Millisecond * 5) + } + + // Acquire lock + if err = b.lockGroupMeta(groupMeta); err != nil { + return false, err + } + defer b.unlockGroupMeta(groupMeta) + + // Update the group meta data + groupMeta.ChordTriggered = true + encoded, err := json.Marshal(&groupMeta) + if err != nil { + return false, err + } + if err = b.getClient().Replace(&gomemcache.Item{ + Key: groupUUID, + Value: encoded, + Expiration: b.getExpirationTimestamp(), + }); err != nil { + return false, err + } + + return true, nil +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + taskState := tasks.NewPendingTaskState(signature) + return b.updateState(taskState) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + taskState := tasks.NewReceivedTaskState(signature) + return b.updateState(taskState) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + taskState := tasks.NewStartedTaskState(signature) + return b.updateState(taskState) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + state := tasks.NewRetryTaskState(signature) + return b.updateState(state) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + taskState := tasks.NewSuccessTaskState(signature, results) + return b.updateState(taskState) +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + taskState := tasks.NewFailureTaskState(signature, err) + return b.updateState(taskState) +} + +// GetState returns the latest task state +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + item, err := b.getClient().Get(taskUUID) + if err != nil { + return nil, err + } + + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(item.Value)) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + return nil, err + } + + return state, nil +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + return b.getClient().Delete(taskUUID) +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + return b.getClient().Delete(groupUUID) +} + +// updateState saves current task state +func (b *Backend) updateState(taskState *tasks.TaskState) error { + encoded, err := json.Marshal(taskState) + if err != nil { + return err + } + + return b.getClient().Set(&gomemcache.Item{ + Key: taskState.TaskUUID, + Value: encoded, + Expiration: b.getExpirationTimestamp(), + }) +} + +// lockGroupMeta acquires lock on group meta data +func (b *Backend) lockGroupMeta(groupMeta *tasks.GroupMeta) error { + groupMeta.Lock = true + encoded, err := json.Marshal(groupMeta) + if err != nil { + return err + } + + return b.getClient().Set(&gomemcache.Item{ + Key: groupMeta.GroupUUID, + Value: encoded, + Expiration: b.getExpirationTimestamp(), + }) +} + +// unlockGroupMeta releases lock on group meta data +func (b *Backend) unlockGroupMeta(groupMeta *tasks.GroupMeta) error { + groupMeta.Lock = false + encoded, err := json.Marshal(groupMeta) + if err != nil { + return err + } + + return b.getClient().Set(&gomemcache.Item{ + Key: groupMeta.GroupUUID, + Value: encoded, + Expiration: b.getExpirationTimestamp(), + }) +} + +// getGroupMeta retrieves group meta data, convenience function to avoid repetition +func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { + item, err := b.getClient().Get(groupUUID) + if err != nil { + return nil, err + } + + groupMeta := new(tasks.GroupMeta) + decoder := json.NewDecoder(bytes.NewReader(item.Value)) + decoder.UseNumber() + if err := decoder.Decode(groupMeta); err != nil { + return nil, err + } + + return groupMeta, nil +} + +// getStates returns multiple task states +func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { + states := make([]*tasks.TaskState, len(taskUUIDs)) + + for i, taskUUID := range taskUUIDs { + item, err := b.getClient().Get(taskUUID) + if err != nil { + return nil, err + } + + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(item.Value)) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + return nil, err + } + + states[i] = state + } + + return states, nil +} + +// getExpirationTimestamp returns expiration timestamp +func (b *Backend) getExpirationTimestamp() int32 { + expiresIn := b.GetConfig().ResultsExpireIn + if expiresIn == 0 { + // // expire results after 1 hour by default + expiresIn = config.DefaultResultsExpireIn + } + return int32(time.Now().Unix() + int64(expiresIn)) +} + +// getClient returns or creates instance of Memcache client +func (b *Backend) getClient() *gomemcache.Client { + if b.client == nil { + b.client = gomemcache.New(b.servers...) + } + return b.client +} diff --git a/v2/backends/memcache/memcache_test.go b/v2/backends/memcache/memcache_test.go new file mode 100644 index 000000000..1486a9642 --- /dev/null +++ b/v2/backends/memcache/memcache_test.go @@ -0,0 +1,142 @@ +package memcache_test + +import ( + "os" + "testing" + "time" + + "github.com/RichardKnop/machinery/v2/backends/memcache" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestGroupCompleted(t *testing.T) { + memcacheURL := os.Getenv("MEMCACHE_URL") + if memcacheURL == "" { + t.Skip("MEMCACHE_URL is not defined") + } + + groupUUID := "testGroupUUID" + task1 := &tasks.Signature{ + UUID: "testTaskUUID1", + GroupUUID: groupUUID, + } + task2 := &tasks.Signature{ + UUID: "testTaskUUID2", + GroupUUID: groupUUID, + } + + backend := memcache.New(new(config.Config), []string{memcacheURL}) + + // Cleanup before the test + backend.PurgeState(task1.UUID) + backend.PurgeState(task2.UUID) + backend.PurgeGroupMeta(groupUUID) + + groupCompleted, err := backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "memcache: cache miss", err.Error()) + } + + backend.InitGroup(groupUUID, []string{task1.UUID, task2.UUID}) + + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "memcache: cache miss", err.Error()) + } + + backend.SetStatePending(task1) + backend.SetStateStarted(task2) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + taskResults := []*tasks.TaskResult{new(tasks.TaskResult)} + backend.SetStateStarted(task1) + backend.SetStateSuccess(task2, taskResults) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + backend.SetStateFailure(task1, "Some error") + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.True(t, groupCompleted) + } +} + +func TestGetState(t *testing.T) { + memcacheURL := os.Getenv("MEMCACHE_URL") + if memcacheURL == "" { + t.Skip("MEMCACHE_URL is not defined") + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend := memcache.New(new(config.Config), []string{memcacheURL}) + + go func() { + backend.SetStatePending(signature) + time.Sleep(2 * time.Millisecond) + backend.SetStateReceived(signature) + time.Sleep(2 * time.Millisecond) + backend.SetStateStarted(signature) + time.Sleep(2 * time.Millisecond) + taskResults := []*tasks.TaskResult{ + { + Type: "float64", + Value: 2, + }, + } + backend.SetStateSuccess(signature, taskResults) + }() + + var ( + taskState *tasks.TaskState + err error + ) + for { + taskState, err = backend.GetState(signature.UUID) + if taskState == nil { + assert.Equal(t, "memcache: cache miss", err.Error()) + continue + } + + assert.NoError(t, err) + if taskState.IsCompleted() { + break + } + } +} + +func TestPurgeState(t *testing.T) { + memcacheURL := os.Getenv("MEMCACHE_URL") + if memcacheURL == "" { + t.Skip("MEMCACHE_URL is not defined") + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend := memcache.New(new(config.Config), []string{memcacheURL}) + + backend.SetStatePending(signature) + taskState, err := backend.GetState(signature.UUID) + assert.NotNil(t, taskState) + assert.NoError(t, err) + + backend.PurgeState(taskState.TaskUUID) + taskState, err = backend.GetState(signature.UUID) + assert.Nil(t, taskState) + assert.Error(t, err) +} diff --git a/v2/backends/mongo/mongodb.go b/v2/backends/mongo/mongodb.go new file mode 100644 index 000000000..63fcaf304 --- /dev/null +++ b/v2/backends/mongo/mongodb.go @@ -0,0 +1,358 @@ +package mongo + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Backend represents a MongoDB result backend +type Backend struct { + common.Backend + client *mongo.Client + tc *mongo.Collection + gmc *mongo.Collection + once sync.Once +} + +// New creates Backend instance +func New(cnf *config.Config) (iface.Backend, error) { + backend := &Backend{ + Backend: common.NewBackend(cnf), + once: sync.Once{}, + } + + return backend, nil +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + groupMeta := &tasks.GroupMeta{ + GroupUUID: groupUUID, + TaskUUIDs: taskUUIDs, + CreatedAt: time.Now().UTC(), + } + _, err := b.groupMetasCollection().InsertOne(context.Background(), groupMeta) + return err +} + +// GroupCompleted returns true if all tasks in a group finished +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + taskStates, err := b.getStates(groupMeta.TaskUUIDs...) + if err != nil { + return false, err + } + + var countSuccessTasks = 0 + for _, taskState := range taskStates { + if taskState.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return []*tasks.TaskState{}, err + } + + return b.getStates(groupMeta.TaskUUIDs...) +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never triggered multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + query := bson.M{ + "_id": groupUUID, + "chord_triggered": false, + } + change := bson.M{ + "$set": bson.M{ + "chord_triggered": true, + }, + } + + _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update()) + + if err != nil { + if err == mongo.ErrNoDocuments { + log.WARNING.Printf("Chord already triggered for group %s", groupUUID) + return false, nil + } + return false, err + } + return true, nil +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + update := bson.M{ + "state": tasks.StatePending, + "task_name": signature.Name, + "created_at": time.Now().UTC(), + } + return b.updateState(signature, update) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + update := bson.M{"state": tasks.StateReceived} + return b.updateState(signature, update) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + update := bson.M{"state": tasks.StateStarted} + return b.updateState(signature, update) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + update := bson.M{"state": tasks.StateRetry} + return b.updateState(signature, update) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + decodedResults := b.decodeResults(results) + update := bson.M{ + "state": tasks.StateSuccess, + "results": decodedResults, + } + return b.updateState(signature, update) +} + +// decodeResults detects & decodes json strings in TaskResult.Value and returns a new slice +func (b *Backend) decodeResults(results []*tasks.TaskResult) []*tasks.TaskResult { + l := len(results) + jsonResults := make([]*tasks.TaskResult, l) + for i, result := range results { + jsonResult := new(bson.M) + resultType := reflect.TypeOf(result.Value).Kind() + if resultType == reflect.String { + err := json.NewDecoder(strings.NewReader(result.Value.(string))).Decode(&jsonResult) + if err == nil { + jsonResults[i] = &tasks.TaskResult{ + Type: "json", + Value: jsonResult, + } + continue + } + } + jsonResults[i] = result + } + return jsonResults +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + update := bson.M{"state": tasks.StateFailure, "error": err} + return b.updateState(signature, update) +} + +// GetState returns the latest task state +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + state := &tasks.TaskState{} + err := b.tasksCollection().FindOne(context.Background(), bson.M{"_id": taskUUID}).Decode(state) + + if err != nil { + return nil, err + } + return state, nil +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + _, err := b.tasksCollection().DeleteOne(context.Background(), bson.M{"_id": taskUUID}) + return err +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + _, err := b.groupMetasCollection().DeleteOne(context.Background(), bson.M{"_id": groupUUID}) + return err +} + +// lockGroupMeta acquires lock on groupUUID document +func (b *Backend) lockGroupMeta(groupUUID string) error { + query := bson.M{ + "_id": groupUUID, + "lock": false, + } + change := bson.M{ + "$set": bson.M{ + "lock": true, + }, + } + + _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update().SetUpsert(true)) + + return err +} + +// unlockGroupMeta releases lock on groupUUID document +func (b *Backend) unlockGroupMeta(groupUUID string) error { + update := bson.M{"$set": bson.M{"lock": false}} + _, err := b.groupMetasCollection().UpdateOne(context.Background(), bson.M{"_id": groupUUID}, update, options.Update()) + return err +} + +// getGroupMeta retrieves group meta data, convenience function to avoid repetition +func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { + groupMeta := &tasks.GroupMeta{} + query := bson.M{"_id": groupUUID} + + err := b.groupMetasCollection().FindOne(context.Background(), query).Decode(groupMeta) + if err != nil { + return nil, err + } + return groupMeta, nil +} + +// getStates returns multiple task states +func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { + states := make([]*tasks.TaskState, 0, len(taskUUIDs)) + cur, err := b.tasksCollection().Find(context.Background(), bson.M{"_id": bson.M{"$in": taskUUIDs}}) + if err != nil { + return nil, err + } + defer cur.Close(context.Background()) + + for cur.Next(context.Background()) { + state := &tasks.TaskState{} + if err := cur.Decode(state); err != nil { + return nil, err + } + states = append(states, state) + } + if cur.Err() != nil { + return nil, err + } + return states, nil +} + +// updateState saves current task state +func (b *Backend) updateState(signature *tasks.Signature, update bson.M) error { + update = bson.M{"$set": update} + _, err := b.tasksCollection().UpdateOne(context.Background(), bson.M{"_id": signature.UUID}, update, options.Update().SetUpsert(true)) + return err +} + +func (b *Backend) tasksCollection() *mongo.Collection { + b.once.Do(func() { + b.connect() + }) + + return b.tc +} + +func (b *Backend) groupMetasCollection() *mongo.Collection { + b.once.Do(func() { + b.connect() + }) + + return b.gmc +} + +// connect creates the underlying mgo connection if it doesn't exist +// creates required indexes for our collections +func (b *Backend) connect() error { + client, err := b.dial() + if err != nil { + return err + } + b.client = client + + database := "machinery" + + if b.GetConfig().MongoDB != nil { + database = b.GetConfig().MongoDB.Database + } + + b.tc = b.client.Database(database).Collection("tasks") + b.gmc = b.client.Database(database).Collection("group_metas") + + err = b.createMongoIndexes(database) + if err != nil { + return err + } + return nil +} + +// dial connects to mongo with TLSConfig if provided +// else connects via ResultBackend uri +func (b *Backend) dial() (*mongo.Client, error) { + + if b.GetConfig().MongoDB != nil && b.GetConfig().MongoDB.Client != nil { + return b.GetConfig().MongoDB.Client, nil + } + + uri := b.GetConfig().ResultBackend + if strings.HasPrefix(uri, "mongodb://") == false && + strings.HasPrefix(uri, "mongodb+srv://") == false { + uri = fmt.Sprintf("mongodb://%s", uri) + } + + client, err := mongo.NewClient(options.Client().ApplyURI(uri)) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := client.Connect(ctx); err != nil { + return nil, err + } + + return client, nil +} + +// createMongoIndexes ensures all indexes are in place +func (b *Backend) createMongoIndexes(database string) error { + + tasksCollection := b.client.Database(database).Collection("tasks") + + expireIn := int32(b.GetConfig().ResultsExpireIn) + + _, err := tasksCollection.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ + { + Keys: bson.M{"state": 1}, + Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn), + }, + mongo.IndexModel{ + Keys: bson.M{"lock": 1}, + Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn), + }, + }) + if err != nil { + return err + } + + return err +} diff --git a/v2/backends/mongo/mongodb_test.go b/v2/backends/mongo/mongodb_test.go new file mode 100644 index 000000000..e3b165596 --- /dev/null +++ b/v2/backends/mongo/mongodb_test.go @@ -0,0 +1,247 @@ +package mongo_test + +import ( + "os" + "testing" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/backends/mongo" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +var ( + groupUUID = "123456" + taskUUIDs = []string{"1", "2", "3"} +) + +func newBackend() (iface.Backend, error) { + cnf := &config.Config{ + ResultBackend: os.Getenv("MONGODB_URL"), + ResultsExpireIn: 30, + } + backend, err := mongo.New(cnf) + if err != nil { + return nil, err + } + + backend.PurgeGroupMeta(groupUUID) + for _, taskUUID := range taskUUIDs { + backend.PurgeState(taskUUID) + } + + if err := backend.InitGroup(groupUUID, taskUUIDs); err != nil { + return nil, err + } + return backend, nil +} + +func TestNew(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if assert.NoError(t, err) { + assert.NotNil(t, backend) + } +} + +func TestSetStatePending(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + err = backend.SetStatePending(&tasks.Signature{ + UUID: taskUUIDs[0], + }) + if assert.NoError(t, err) { + taskState, err := backend.GetState(taskUUIDs[0]) + if assert.NoError(t, err) { + assert.Equal(t, tasks.StatePending, taskState.State, "Not StatePending") + } + } +} + +func TestSetStateReceived(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + err = backend.SetStateReceived(&tasks.Signature{ + UUID: taskUUIDs[0], + }) + if assert.NoError(t, err) { + taskState, err := backend.GetState(taskUUIDs[0]) + if assert.NoError(t, err) { + assert.Equal(t, tasks.StateReceived, taskState.State, "Not StateReceived") + } + } +} + +func TestSetStateStarted(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + err = backend.SetStateStarted(&tasks.Signature{ + UUID: taskUUIDs[0], + }) + if assert.NoError(t, err) { + taskState, err := backend.GetState(taskUUIDs[0]) + if assert.NoError(t, err) { + assert.Equal(t, tasks.StateStarted, taskState.State, "Not StateStarted") + } + } +} + +func TestSetStateSuccess(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + resultType := "float64" + resultValue := float64(88.5) + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + signature := &tasks.Signature{ + UUID: taskUUIDs[0], + } + taskResults := []*tasks.TaskResult{ + { + Type: resultType, + Value: resultValue, + }, + } + err = backend.SetStateSuccess(signature, taskResults) + assert.NoError(t, err) + + taskState, err := backend.GetState(taskUUIDs[0]) + assert.NoError(t, err) + assert.Equal(t, tasks.StateSuccess, taskState.State, "Not StateSuccess") + assert.Equal(t, resultType, taskState.Results[0].Type, "Wrong result type") + assert.Equal(t, float64(resultValue), taskState.Results[0].Value.(float64), "Wrong result value") +} + +func TestSetStateFailure(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + failString := "Fail is ok" + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + signature := &tasks.Signature{ + UUID: taskUUIDs[0], + } + err = backend.SetStateFailure(signature, failString) + assert.NoError(t, err) + + taskState, err := backend.GetState(taskUUIDs[0]) + assert.NoError(t, err) + assert.Equal(t, tasks.StateFailure, taskState.State, "Not StateSuccess") + assert.Equal(t, failString, taskState.Error, "Wrong fail error") +} + +func TestGroupCompleted(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + taskResultsState := make(map[string]string) + + isCompleted, err := backend.GroupCompleted(groupUUID, len(taskUUIDs)) + if assert.NoError(t, err) { + assert.False(t, isCompleted, "Actually group is not completed") + } + + signature := &tasks.Signature{ + UUID: taskUUIDs[0], + } + err = backend.SetStateFailure(signature, "Fail is ok") + assert.NoError(t, err) + taskResultsState[taskUUIDs[0]] = tasks.StateFailure + + signature = &tasks.Signature{ + UUID: taskUUIDs[1], + } + taskResults := []*tasks.TaskResult{ + { + Type: "string", + Value: "Result ok", + }, + } + err = backend.SetStateSuccess(signature, taskResults) + assert.NoError(t, err) + taskResultsState[taskUUIDs[1]] = tasks.StateSuccess + + signature = &tasks.Signature{ + UUID: taskUUIDs[2], + } + err = backend.SetStateSuccess(signature, taskResults) + assert.NoError(t, err) + taskResultsState[taskUUIDs[2]] = tasks.StateSuccess + + isCompleted, err = backend.GroupCompleted(groupUUID, len(taskUUIDs)) + if assert.NoError(t, err) { + assert.True(t, isCompleted, "Actually group is completed") + } + + taskStates, err := backend.GroupTaskStates(groupUUID, len(taskUUIDs)) + assert.NoError(t, err) + + assert.Equal(t, len(taskStates), len(taskUUIDs), "Wrong len tasksStates") + for _, taskState := range taskStates { + assert.Equal( + t, + taskResultsState[taskState.TaskUUID], + taskState.State, + "Wrong state on", taskState.TaskUUID, + ) + } +} + +func TestGroupStates(t *testing.T) { + if os.Getenv("MONGODB_URL") == "" { + t.Skip("MONGODB_URL is not defined") + } + + backend, err := newBackend() + if err != nil { + t.Fatal(err) + } + + taskStates, err := backend.GroupTaskStates(groupUUID, len(taskUUIDs)) + assert.NoError(t, err) + for i, taskState := range taskStates { + assert.Equal(t, taskUUIDs[i], taskState.TaskUUID) + } +} diff --git a/v2/backends/null/null.go b/v2/backends/null/null.go new file mode 100644 index 000000000..87d6b1610 --- /dev/null +++ b/v2/backends/null/null.go @@ -0,0 +1,146 @@ +package null + +import ( + "fmt" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// ErrGroupNotFound ... +type ErrGroupNotFound struct { + groupUUID string +} + +// NewErrGroupNotFound returns new instance of ErrGroupNotFound +func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound { + return ErrGroupNotFound{groupUUID: groupUUID} +} + +// Error implements error interface +func (e ErrGroupNotFound) Error() string { + return fmt.Sprintf("Group not found: %v", e.groupUUID) +} + +// ErrTasknotFound ... +type ErrTasknotFound struct { + taskUUID string +} + +// NewErrTasknotFound returns new instance of ErrTasknotFound +func NewErrTasknotFound(taskUUID string) ErrTasknotFound { + return ErrTasknotFound{taskUUID: taskUUID} +} + +// Error implements error interface +func (e ErrTasknotFound) Error() string { + return fmt.Sprintf("Task not found: %v", e.taskUUID) +} + +// Backend represents an "null" result backend +type Backend struct { + common.Backend + groups map[string]struct{} +} + +// New creates NullBackend instance +func New() iface.Backend { + return &Backend{ + Backend: common.NewBackend(new(config.Config)), + groups: make(map[string]struct{}), + } +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + b.groups[groupUUID] = struct{}{} + return nil +} + +// GroupCompleted returns true (always) +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + _, ok := b.groups[groupUUID] + if !ok { + return false, NewErrGroupNotFound(groupUUID) + } + + return true, nil +} + +// GroupTaskStates returns null states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + _, ok := b.groups[groupUUID] + if !ok { + return nil, NewErrGroupNotFound(groupUUID) + } + + ret := make([]*tasks.TaskState, 0, groupTaskCount) + return ret, nil +} + +// TriggerChord returns true (always) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + return true, nil +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + state := tasks.NewPendingTaskState(signature) + return b.updateState(state) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + state := tasks.NewReceivedTaskState(signature) + return b.updateState(state) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + state := tasks.NewStartedTaskState(signature) + return b.updateState(state) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + state := tasks.NewRetryTaskState(signature) + return b.updateState(state) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + state := tasks.NewSuccessTaskState(signature, results) + return b.updateState(state) +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + state := tasks.NewFailureTaskState(signature, err) + return b.updateState(state) +} + +// GetState returns the latest task state +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + return nil, NewErrTasknotFound(taskUUID) +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + return NewErrTasknotFound(taskUUID) +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + _, ok := b.groups[groupUUID] + if !ok { + return NewErrGroupNotFound(groupUUID) + } + + return nil +} + +func (b *Backend) updateState(s *tasks.TaskState) error { + return nil +} diff --git a/v2/backends/package.go b/v2/backends/package.go new file mode 100644 index 000000000..6c70ab4f4 --- /dev/null +++ b/v2/backends/package.go @@ -0,0 +1 @@ +package backends diff --git a/v2/backends/redis/goredis.go b/v2/backends/redis/goredis.go new file mode 100644 index 000000000..119a43017 --- /dev/null +++ b/v2/backends/redis/goredis.go @@ -0,0 +1,313 @@ +package redis + +import ( + "bytes" + "context" + "encoding/json" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8" + "github.com/go-redsync/redsync/v4" + redsyncgoredis "github.com/go-redsync/redsync/v4/redis/goredis/v8" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// BackendGR represents a Redis result backend +type BackendGR struct { + common.Backend + rclient redis.UniversalClient + host string + password string + db int + // If set, path to a socket file overrides hostname + socketPath string + redsync *redsync.Redsync + redisOnce sync.Once +} + +// NewGR creates Backend instance +func NewGR(cnf *config.Config, addrs []string, db int) iface.Backend { + b := &BackendGR{ + Backend: common.NewBackend(cnf), + } + parts := strings.Split(addrs[0], "@") + if len(parts) == 2 { + // with passwrod + b.password = parts[0] + addrs[0] = parts[1] + } + + ropt := &redis.UniversalOptions{ + Addrs: addrs, + DB: db, + Password: b.password, + } + if cnf.Redis != nil { + ropt.MasterName = cnf.Redis.MasterName + } + + b.rclient = redis.NewUniversalClient(ropt) + b.redsync = redsync.New(redsyncgoredis.NewPool(b.rclient)) + return b +} + +// InitGroup creates and saves a group meta data object +func (b *BackendGR) InitGroup(groupUUID string, taskUUIDs []string) error { + groupMeta := &tasks.GroupMeta{ + GroupUUID: groupUUID, + TaskUUIDs: taskUUIDs, + CreatedAt: time.Now().UTC(), + } + + encoded, err := json.Marshal(groupMeta) + if err != nil { + return err + } + + expiration := b.getExpiration() + err = b.rclient.Set(context.Background(), groupUUID, encoded, expiration).Err() + if err != nil { + return err + } + + return nil +} + +// GroupCompleted returns true if all tasks in a group finished +func (b *BackendGR) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + taskStates, err := b.getStates(groupMeta.TaskUUIDs...) + if err != nil { + return false, err + } + + var countSuccessTasks = 0 + for _, taskState := range taskStates { + if taskState.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *BackendGR) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return []*tasks.TaskState{}, err + } + + return b.getStates(groupMeta.TaskUUIDs...) +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never trigerred multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *BackendGR) TriggerChord(groupUUID string) (bool, error) { + m := b.redsync.NewMutex("TriggerChordMutex") + if err := m.Lock(); err != nil { + return false, err + } + defer m.Unlock() + + groupMeta, err := b.getGroupMeta(groupUUID) + if err != nil { + return false, err + } + + // Chord has already been triggered, return false (should not trigger again) + if groupMeta.ChordTriggered { + return false, nil + } + + // Set flag to true + groupMeta.ChordTriggered = true + + // Update the group meta + encoded, err := json.Marshal(&groupMeta) + if err != nil { + return false, err + } + + expiration := b.getExpiration() + err = b.rclient.Set(context.Background(), groupUUID, encoded, expiration).Err() + if err != nil { + return false, err + } + + return true, nil +} + +func (b *BackendGR) mergeNewTaskState(newState *tasks.TaskState) { + state, err := b.GetState(newState.TaskUUID) + if err == nil { + newState.CreatedAt = state.CreatedAt + newState.TaskName = state.TaskName + } +} + +// SetStatePending updates task state to PENDING +func (b *BackendGR) SetStatePending(signature *tasks.Signature) error { + taskState := tasks.NewPendingTaskState(signature) + return b.updateState(taskState) +} + +// SetStateReceived updates task state to RECEIVED +func (b *BackendGR) SetStateReceived(signature *tasks.Signature) error { + taskState := tasks.NewReceivedTaskState(signature) + b.mergeNewTaskState(taskState) + return b.updateState(taskState) +} + +// SetStateStarted updates task state to STARTED +func (b *BackendGR) SetStateStarted(signature *tasks.Signature) error { + taskState := tasks.NewStartedTaskState(signature) + b.mergeNewTaskState(taskState) + return b.updateState(taskState) +} + +// SetStateRetry updates task state to RETRY +func (b *BackendGR) SetStateRetry(signature *tasks.Signature) error { + taskState := tasks.NewRetryTaskState(signature) + b.mergeNewTaskState(taskState) + return b.updateState(taskState) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *BackendGR) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + taskState := tasks.NewSuccessTaskState(signature, results) + b.mergeNewTaskState(taskState) + return b.updateState(taskState) +} + +// SetStateFailure updates task state to FAILURE +func (b *BackendGR) SetStateFailure(signature *tasks.Signature, err string) error { + taskState := tasks.NewFailureTaskState(signature, err) + b.mergeNewTaskState(taskState) + return b.updateState(taskState) +} + +// GetState returns the latest task state +func (b *BackendGR) GetState(taskUUID string) (*tasks.TaskState, error) { + + item, err := b.rclient.Get(context.Background(), taskUUID).Bytes() + if err != nil { + return nil, err + } + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(item)) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + return nil, err + } + + return state, nil +} + +// PurgeState deletes stored task state +func (b *BackendGR) PurgeState(taskUUID string) error { + err := b.rclient.Del(context.Background(), taskUUID).Err() + if err != nil { + return err + } + + return nil +} + +// PurgeGroupMeta deletes stored group meta data +func (b *BackendGR) PurgeGroupMeta(groupUUID string) error { + err := b.rclient.Del(context.Background(), groupUUID).Err() + if err != nil { + return err + } + + return nil +} + +// getGroupMeta retrieves group meta data, convenience function to avoid repetition +func (b *BackendGR) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { + item, err := b.rclient.Get(context.Background(), groupUUID).Bytes() + if err != nil { + return nil, err + } + + groupMeta := new(tasks.GroupMeta) + decoder := json.NewDecoder(bytes.NewReader(item)) + decoder.UseNumber() + if err := decoder.Decode(groupMeta); err != nil { + return nil, err + } + + return groupMeta, nil +} + +// getStates returns multiple task states +func (b *BackendGR) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { + taskStates := make([]*tasks.TaskState, len(taskUUIDs)) + // to avoid CROSSSLOT error, use pipeline + cmders, err := b.rclient.Pipelined(context.Background(), func(pipeliner redis.Pipeliner) error { + for _, uuid := range taskUUIDs { + pipeliner.Get(context.Background(), uuid) + } + return nil + }) + if err != nil { + return taskStates, err + } + for i, cmder := range cmders { + stateBytes, err1 := cmder.(*redis.StringCmd).Bytes() + if err1 != nil { + return taskStates, err1 + } + taskState := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(stateBytes)) + decoder.UseNumber() + if err1 = decoder.Decode(taskState); err1 != nil { + log.ERROR.Print(err1) + return taskStates, err1 + } + taskStates[i] = taskState + } + + return taskStates, nil +} + +// updateState saves current task state +func (b *BackendGR) updateState(taskState *tasks.TaskState) error { + encoded, err := json.Marshal(taskState) + if err != nil { + return err + } + + expiration := b.getExpiration() + _, err = b.rclient.Set(context.Background(), taskState.TaskUUID, encoded, expiration).Result() + if err != nil { + return err + } + + return nil +} + +// getExpiration returns expiration for a stored task state +func (b *BackendGR) getExpiration() time.Duration { + expiresIn := b.GetConfig().ResultsExpireIn + if expiresIn == 0 { + // expire results after 1 hour by default + expiresIn = config.DefaultResultsExpireIn + } + + return time.Duration(expiresIn) * time.Second +} diff --git a/v2/backends/redis/goredis_test.go b/v2/backends/redis/goredis_test.go new file mode 100644 index 000000000..894e9a43b --- /dev/null +++ b/v2/backends/redis/goredis_test.go @@ -0,0 +1,161 @@ +package redis_test + +import ( + "github.com/RichardKnop/machinery/v2/backends/iface" + "os" + "strings" + "testing" + + "github.com/RichardKnop/machinery/v2/backends/redis" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func getRedisG() iface.Backend { + // host1:port1,host2:port2 + redisURL := os.Getenv("REDIS_URL_GR") + //redisPassword := os.Getenv("REDIS_PASSWORD") + if redisURL == "" { + return nil + } + backend := redis.NewGR(new(config.Config), strings.Split(redisURL, ","), 0) + return backend +} + +func TestGroupCompletedGR(t *testing.T) { + backend := getRedisG() + if backend == nil { + t.Skip() + } + + groupUUID := "testGroupUUID" + task1 := &tasks.Signature{ + UUID: "testTaskUUID1", + GroupUUID: groupUUID, + } + task2 := &tasks.Signature{ + UUID: "testTaskUUID2", + GroupUUID: groupUUID, + } + + // Cleanup before the test + backend.PurgeState(task1.UUID) + backend.PurgeState(task2.UUID) + backend.PurgeGroupMeta(groupUUID) + + groupCompleted, err := backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "redis: nil", err.Error()) + } + + backend.InitGroup(groupUUID, []string{task1.UUID, task2.UUID}) + + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "redis: nil", err.Error()) + } + + backend.SetStatePending(task1) + backend.SetStateStarted(task2) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + taskResults := []*tasks.TaskResult{new(tasks.TaskResult)} + backend.SetStateStarted(task1) + backend.SetStateSuccess(task2, taskResults) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + backend.SetStateFailure(task1, "Some error") + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.True(t, groupCompleted) + } +} + +func TestGetStateGR(t *testing.T) { + backend := getRedisG() + if backend == nil { + t.Skip() + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend.PurgeState("testTaskUUID") + + var ( + taskState *tasks.TaskState + err error + ) + + taskState, err = backend.GetState(signature.UUID) + assert.Equal(t, "redis: nil", err.Error()) + assert.Nil(t, taskState) + + //Pending State + backend.SetStatePending(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + createdAt := taskState.CreatedAt + + //Received State + backend.SetStateReceived(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + + //Started State + backend.SetStateStarted(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + + //Success State + taskResults := []*tasks.TaskResult{ + { + Type: "float64", + Value: 2, + }, + } + backend.SetStateSuccess(signature, taskResults) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + assert.NotNil(t, taskState.Results) +} + +func TestPurgeStateGR(t *testing.T) { + backend := getRedisG() + if backend == nil { + t.Skip() + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend.SetStatePending(signature) + taskState, err := backend.GetState(signature.UUID) + assert.NotNil(t, taskState) + assert.NoError(t, err) + + backend.PurgeState(taskState.TaskUUID) + taskState, err = backend.GetState(signature.UUID) + assert.Nil(t, taskState) + assert.Error(t, err) +} diff --git a/v2/backends/redis/redis.go b/v2/backends/redis/redis.go new file mode 100644 index 000000000..72bf75518 --- /dev/null +++ b/v2/backends/redis/redis.go @@ -0,0 +1,354 @@ +package redis + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/go-redsync/redsync/v4" + redsyncredis "github.com/go-redsync/redsync/v4/redis/redigo" + "github.com/gomodule/redigo/redis" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Backend represents a Redis result backend +type Backend struct { + common.Backend + host string + password string + db int + pool *redis.Pool + // If set, path to a socket file overrides hostname + socketPath string + redsync *redsync.Redsync + redisOnce sync.Once + common.RedisConnector +} + +// New creates Backend instance +func New(cnf *config.Config, host, password, socketPath string, db int) iface.Backend { + return &Backend{ + Backend: common.NewBackend(cnf), + host: host, + db: db, + password: password, + socketPath: socketPath, + } +} + +// InitGroup creates and saves a group meta data object +func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { + groupMeta := &tasks.GroupMeta{ + GroupUUID: groupUUID, + TaskUUIDs: taskUUIDs, + CreatedAt: time.Now().UTC(), + } + + encoded, err := json.Marshal(groupMeta) + if err != nil { + return err + } + + conn := b.open() + defer conn.Close() + + expiration := int64(b.getExpiration().Seconds()) + _, err = conn.Do("SET", groupUUID, encoded, "EX", expiration) + if err != nil { + return err + } + + return nil +} + +// GroupCompleted returns true if all tasks in a group finished +func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { + conn := b.open() + defer conn.Close() + + groupMeta, err := b.getGroupMeta(conn, groupUUID) + if err != nil { + return false, err + } + + taskStates, err := b.getStates(conn, groupMeta.TaskUUIDs...) + if err != nil { + return false, err + } + + var countSuccessTasks = 0 + for _, taskState := range taskStates { + if taskState.IsCompleted() { + countSuccessTasks++ + } + } + + return countSuccessTasks == groupTaskCount, nil +} + +// GroupTaskStates returns states of all tasks in the group +func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { + conn := b.open() + defer conn.Close() + + groupMeta, err := b.getGroupMeta(conn, groupUUID) + if err != nil { + return []*tasks.TaskState{}, err + } + + return b.getStates(conn, groupMeta.TaskUUIDs...) +} + +// TriggerChord flags chord as triggered in the backend storage to make sure +// chord is never trigerred multiple times. Returns a boolean flag to indicate +// whether the worker should trigger chord (true) or no if it has been triggered +// already (false) +func (b *Backend) TriggerChord(groupUUID string) (bool, error) { + conn := b.open() + defer conn.Close() + + m := b.redsync.NewMutex("TriggerChordMutex") + if err := m.Lock(); err != nil { + return false, err + } + defer m.Unlock() + + groupMeta, err := b.getGroupMeta(conn, groupUUID) + if err != nil { + return false, err + } + + // Chord has already been triggered, return false (should not trigger again) + if groupMeta.ChordTriggered { + return false, nil + } + + // Set flag to true + groupMeta.ChordTriggered = true + + // Update the group meta + encoded, err := json.Marshal(&groupMeta) + if err != nil { + return false, err + } + + expiration := int64(b.getExpiration().Seconds()) + _, err = conn.Do("SET", groupUUID, encoded, "EX", expiration) + if err != nil { + return false, err + } + + return true, nil +} + +func (b *Backend) mergeNewTaskState(conn redis.Conn, newState *tasks.TaskState) { + state, err := b.getState(conn, newState.TaskUUID) + if err == nil { + newState.CreatedAt = state.CreatedAt + newState.TaskName = state.TaskName + } +} + +// SetStatePending updates task state to PENDING +func (b *Backend) SetStatePending(signature *tasks.Signature) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewPendingTaskState(signature) + return b.updateState(conn, taskState) +} + +// SetStateReceived updates task state to RECEIVED +func (b *Backend) SetStateReceived(signature *tasks.Signature) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewReceivedTaskState(signature) + b.mergeNewTaskState(conn, taskState) + return b.updateState(conn, taskState) +} + +// SetStateStarted updates task state to STARTED +func (b *Backend) SetStateStarted(signature *tasks.Signature) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewStartedTaskState(signature) + b.mergeNewTaskState(conn, taskState) + return b.updateState(conn, taskState) +} + +// SetStateRetry updates task state to RETRY +func (b *Backend) SetStateRetry(signature *tasks.Signature) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewRetryTaskState(signature) + b.mergeNewTaskState(conn, taskState) + return b.updateState(conn, taskState) +} + +// SetStateSuccess updates task state to SUCCESS +func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewSuccessTaskState(signature, results) + b.mergeNewTaskState(conn, taskState) + return b.updateState(conn, taskState) +} + +// SetStateFailure updates task state to FAILURE +func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { + conn := b.open() + defer conn.Close() + + taskState := tasks.NewFailureTaskState(signature, err) + b.mergeNewTaskState(conn, taskState) + return b.updateState(conn, taskState) +} + +// GetState returns the latest task state +func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { + conn := b.open() + defer conn.Close() + + return b.getState(conn, taskUUID) +} + +func (b *Backend) getState(conn redis.Conn, taskUUID string) (*tasks.TaskState, error) { + item, err := redis.Bytes(conn.Do("GET", taskUUID)) + if err != nil { + return nil, err + } + state := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(item)) + decoder.UseNumber() + if err := decoder.Decode(state); err != nil { + return nil, err + } + + return state, nil +} + +// PurgeState deletes stored task state +func (b *Backend) PurgeState(taskUUID string) error { + conn := b.open() + defer conn.Close() + + _, err := conn.Do("DEL", taskUUID) + if err != nil { + return err + } + + return nil +} + +// PurgeGroupMeta deletes stored group meta data +func (b *Backend) PurgeGroupMeta(groupUUID string) error { + conn := b.open() + defer conn.Close() + + _, err := conn.Do("DEL", groupUUID) + if err != nil { + return err + } + + return nil +} + +// getGroupMeta retrieves group meta data, convenience function to avoid repetition +func (b *Backend) getGroupMeta(conn redis.Conn, groupUUID string) (*tasks.GroupMeta, error) { + + item, err := redis.Bytes(conn.Do("GET", groupUUID)) + if err != nil { + return nil, err + } + + groupMeta := new(tasks.GroupMeta) + decoder := json.NewDecoder(bytes.NewReader(item)) + decoder.UseNumber() + if err := decoder.Decode(groupMeta); err != nil { + return nil, err + } + + return groupMeta, nil +} + +// getStates returns multiple task states +func (b *Backend) getStates(conn redis.Conn, taskUUIDs ...string) ([]*tasks.TaskState, error) { + taskStates := make([]*tasks.TaskState, len(taskUUIDs)) + + // conn.Do requires []interface{}... can't pass []string unfortunately + taskUUIDInterfaces := make([]interface{}, len(taskUUIDs)) + for i, taskUUID := range taskUUIDs { + taskUUIDInterfaces[i] = interface{}(taskUUID) + } + + reply, err := redis.Values(conn.Do("MGET", taskUUIDInterfaces...)) + if err != nil { + return taskStates, err + } + + for i, value := range reply { + stateBytes, ok := value.([]byte) + if !ok { + return taskStates, fmt.Errorf("Expected byte array, instead got: %v", value) + } + + taskState := new(tasks.TaskState) + decoder := json.NewDecoder(bytes.NewReader(stateBytes)) + decoder.UseNumber() + if err := decoder.Decode(taskState); err != nil { + log.ERROR.Print(err) + return taskStates, err + } + + taskStates[i] = taskState + } + + return taskStates, nil +} + +// updateState saves current task state +func (b *Backend) updateState(conn redis.Conn, taskState *tasks.TaskState) error { + encoded, err := json.Marshal(taskState) + if err != nil { + return err + } + + expiration := int64(b.getExpiration().Seconds()) + _, err = conn.Do("SET", taskState.TaskUUID, encoded, "EX", expiration) + if err != nil { + return err + } + + return nil +} + +// getExpiration returns expiration for a stored task state +func (b *Backend) getExpiration() time.Duration { + expiresIn := b.GetConfig().ResultsExpireIn + if expiresIn == 0 { + // expire results after 1 hour by default + expiresIn = config.DefaultResultsExpireIn + } + + return time.Duration(expiresIn) * time.Second +} + +// open returns or creates instance of Redis connection +func (b *Backend) open() redis.Conn { + b.redisOnce.Do(func() { + b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig) + b.redsync = redsync.New(redsyncredis.NewPool(b.pool)) + }) + return b.pool.Get() +} diff --git a/v2/backends/redis/redis_test.go b/v2/backends/redis/redis_test.go new file mode 100644 index 000000000..7493b1bbd --- /dev/null +++ b/v2/backends/redis/redis_test.go @@ -0,0 +1,157 @@ +package redis_test + +import ( + "os" + "testing" + + "github.com/RichardKnop/machinery/v2/backends/redis" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestGroupCompleted(t *testing.T) { + redisURL := os.Getenv("REDIS_URL") + redisPassword := os.Getenv("REDIS_PASSWORD") + if redisURL == "" { + t.Skip("REDIS_URL is not defined") + } + + groupUUID := "testGroupUUID" + task1 := &tasks.Signature{ + UUID: "testTaskUUID1", + GroupUUID: groupUUID, + } + task2 := &tasks.Signature{ + UUID: "testTaskUUID2", + GroupUUID: groupUUID, + } + + backend := redis.New(new(config.Config), redisURL, redisPassword, "", 0) + + // Cleanup before the test + backend.PurgeState(task1.UUID) + backend.PurgeState(task2.UUID) + backend.PurgeGroupMeta(groupUUID) + + groupCompleted, err := backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "redigo: nil returned", err.Error()) + } + + backend.InitGroup(groupUUID, []string{task1.UUID, task2.UUID}) + + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.Error(t, err) { + assert.False(t, groupCompleted) + assert.Equal(t, "Expected byte array, instead got: ", err.Error()) + } + + backend.SetStatePending(task1) + backend.SetStateStarted(task2) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + taskResults := []*tasks.TaskResult{new(tasks.TaskResult)} + backend.SetStateStarted(task1) + backend.SetStateSuccess(task2, taskResults) + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.False(t, groupCompleted) + } + + backend.SetStateFailure(task1, "Some error") + groupCompleted, err = backend.GroupCompleted(groupUUID, 2) + if assert.NoError(t, err) { + assert.True(t, groupCompleted) + } +} + +func TestGetState(t *testing.T) { + redisURL := os.Getenv("REDIS_URL") + redisPassword := os.Getenv("REDIS_PASSWORD") + if redisURL == "" { + return + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend := redis.New(new(config.Config), redisURL, redisPassword, "", 0) + + backend.PurgeState("testTaskUUID") + + var ( + taskState *tasks.TaskState + err error + ) + + taskState, err = backend.GetState(signature.UUID) + assert.Equal(t, "redigo: nil returned", err.Error()) + assert.Nil(t, taskState) + + //Pending State + backend.SetStatePending(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + createdAt := taskState.CreatedAt + + //Received State + backend.SetStateReceived(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + + //Started State + backend.SetStateStarted(signature) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + + //Success State + taskResults := []*tasks.TaskResult{ + { + Type: "float64", + Value: 2, + }, + } + backend.SetStateSuccess(signature, taskResults) + taskState, err = backend.GetState(signature.UUID) + assert.NoError(t, err) + assert.Equal(t, signature.Name, taskState.TaskName) + assert.Equal(t, createdAt, taskState.CreatedAt) + assert.NotNil(t, taskState.Results) +} + +func TestPurgeState(t *testing.T) { + redisURL := os.Getenv("REDIS_URL") + redisPassword := os.Getenv("REDIS_PASSWORD") + if redisURL == "" { + return + } + + signature := &tasks.Signature{ + UUID: "testTaskUUID", + GroupUUID: "testGroupUUID", + } + + backend := redis.New(new(config.Config), redisURL, redisPassword, "", 0) + + backend.SetStatePending(signature) + taskState, err := backend.GetState(signature.UUID) + assert.NotNil(t, taskState) + assert.NoError(t, err) + + backend.PurgeState(taskState.TaskUUID) + taskState, err = backend.GetState(signature.UUID) + assert.Nil(t, taskState) + assert.Error(t, err) +} diff --git a/v2/backends/result/async_result.go b/v2/backends/result/async_result.go new file mode 100644 index 000000000..7eaaf1371 --- /dev/null +++ b/v2/backends/result/async_result.go @@ -0,0 +1,256 @@ +package result + +import ( + "errors" + "reflect" + "time" + + "github.com/RichardKnop/machinery/v2/backends/iface" + "github.com/RichardKnop/machinery/v2/tasks" +) + +var ( + // ErrBackendNotConfigured ... + ErrBackendNotConfigured = errors.New("Result backend not configured") + // ErrTimeoutReached ... + ErrTimeoutReached = errors.New("Timeout reached") +) + +// AsyncResult represents a task result +type AsyncResult struct { + Signature *tasks.Signature + taskState *tasks.TaskState + backend iface.Backend +} + +// ChordAsyncResult represents a result of a chord +type ChordAsyncResult struct { + groupAsyncResults []*AsyncResult + chordAsyncResult *AsyncResult + backend iface.Backend +} + +// ChainAsyncResult represents a result of a chain of tasks +type ChainAsyncResult struct { + asyncResults []*AsyncResult + backend iface.Backend +} + +// NewAsyncResult creates AsyncResult instance +func NewAsyncResult(signature *tasks.Signature, backend iface.Backend) *AsyncResult { + return &AsyncResult{ + Signature: signature, + taskState: new(tasks.TaskState), + backend: backend, + } +} + +// NewChordAsyncResult creates ChordAsyncResult instance +func NewChordAsyncResult(groupTasks []*tasks.Signature, chordCallback *tasks.Signature, backend iface.Backend) *ChordAsyncResult { + asyncResults := make([]*AsyncResult, len(groupTasks)) + for i, task := range groupTasks { + asyncResults[i] = NewAsyncResult(task, backend) + } + return &ChordAsyncResult{ + groupAsyncResults: asyncResults, + chordAsyncResult: NewAsyncResult(chordCallback, backend), + backend: backend, + } +} + +// NewChainAsyncResult creates ChainAsyncResult instance +func NewChainAsyncResult(tasks []*tasks.Signature, backend iface.Backend) *ChainAsyncResult { + asyncResults := make([]*AsyncResult, len(tasks)) + for i, task := range tasks { + asyncResults[i] = NewAsyncResult(task, backend) + } + return &ChainAsyncResult{ + asyncResults: asyncResults, + backend: backend, + } +} + +// Touch the state and don't wait +func (asyncResult *AsyncResult) Touch() ([]reflect.Value, error) { + if asyncResult.backend == nil { + return nil, ErrBackendNotConfigured + } + + asyncResult.GetState() + + // Purge state if we are using AMQP backend + if asyncResult.backend.IsAMQP() && asyncResult.taskState.IsCompleted() { + asyncResult.backend.PurgeState(asyncResult.taskState.TaskUUID) + } + + if asyncResult.taskState.IsFailure() { + return nil, errors.New(asyncResult.taskState.Error) + } + + if asyncResult.taskState.IsSuccess() { + return tasks.ReflectTaskResults(asyncResult.taskState.Results) + } + + return nil, nil +} + +// Get returns task results (synchronous blocking call) +func (asyncResult *AsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { + for { + results, err := asyncResult.Touch() + + if results == nil && err == nil { + time.Sleep(sleepDuration) + } else { + return results, err + } + } +} + +// GetWithTimeout returns task results with a timeout (synchronous blocking call) +func (asyncResult *AsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { + timeout := time.NewTimer(timeoutDuration) + + for { + select { + case <-timeout.C: + return nil, ErrTimeoutReached + default: + results, err := asyncResult.Touch() + + if results == nil && err == nil { + time.Sleep(sleepDuration) + } else { + return results, err + } + } + } +} + +// GetState returns latest task state +func (asyncResult *AsyncResult) GetState() *tasks.TaskState { + if asyncResult.taskState.IsCompleted() { + return asyncResult.taskState + } + + taskState, err := asyncResult.backend.GetState(asyncResult.Signature.UUID) + if err == nil { + asyncResult.taskState = taskState + } + + return asyncResult.taskState +} + +// Get returns results of a chain of tasks (synchronous blocking call) +func (chainAsyncResult *ChainAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { + if chainAsyncResult.backend == nil { + return nil, ErrBackendNotConfigured + } + + var ( + results []reflect.Value + err error + ) + + for _, asyncResult := range chainAsyncResult.asyncResults { + results, err = asyncResult.Get(sleepDuration) + if err != nil { + return nil, err + } + } + + return results, err +} + +// Get returns result of a chord (synchronous blocking call) +func (chordAsyncResult *ChordAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { + if chordAsyncResult.backend == nil { + return nil, ErrBackendNotConfigured + } + + var err error + for _, asyncResult := range chordAsyncResult.groupAsyncResults { + _, err = asyncResult.Get(sleepDuration) + if err != nil { + return nil, err + } + } + + return chordAsyncResult.chordAsyncResult.Get(sleepDuration) +} + +// GetWithTimeout returns results of a chain of tasks with timeout (synchronous blocking call) +func (chainAsyncResult *ChainAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { + if chainAsyncResult.backend == nil { + return nil, ErrBackendNotConfigured + } + + var ( + results []reflect.Value + err error + ) + + timeout := time.NewTimer(timeoutDuration) + ln := len(chainAsyncResult.asyncResults) + lastResult := chainAsyncResult.asyncResults[ln-1] + + for { + select { + case <-timeout.C: + return nil, ErrTimeoutReached + default: + + for _, asyncResult := range chainAsyncResult.asyncResults { + _, err = asyncResult.Touch() + if err != nil { + return nil, err + } + } + + results, err = lastResult.Touch() + if err != nil { + return nil, err + } + if results != nil { + return results, err + } + time.Sleep(sleepDuration) + } + } +} + +// GetWithTimeout returns result of a chord with a timeout (synchronous blocking call) +func (chordAsyncResult *ChordAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { + if chordAsyncResult.backend == nil { + return nil, ErrBackendNotConfigured + } + + var ( + results []reflect.Value + err error + ) + + timeout := time.NewTimer(timeoutDuration) + for { + select { + case <-timeout.C: + return nil, ErrTimeoutReached + default: + for _, asyncResult := range chordAsyncResult.groupAsyncResults { + _, errcur := asyncResult.Touch() + if errcur != nil { + return nil, err + } + } + + results, err = chordAsyncResult.chordAsyncResult.Touch() + if err != nil { + return nil, nil + } + if results != nil { + return results, err + } + time.Sleep(sleepDuration) + } + } +} diff --git a/v2/brokers/amqp/amqp.go b/v2/brokers/amqp/amqp.go new file mode 100644 index 000000000..b26229ca8 --- /dev/null +++ b/v2/brokers/amqp/amqp.go @@ -0,0 +1,492 @@ +package amqp + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/RichardKnop/machinery/v2/brokers/errs" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/pkg/errors" + "github.com/streadway/amqp" +) + +type AMQPConnection struct { + queueName string + connection *amqp.Connection + channel *amqp.Channel + queue amqp.Queue + confirmation <-chan amqp.Confirmation + errorchan <-chan *amqp.Error + cleanup chan struct{} +} + +// Broker represents an AMQP broker +type Broker struct { + common.Broker + common.AMQPConnector + processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal + + connections map[string]*AMQPConnection + connectionsMutex sync.RWMutex +} + +// New creates new Broker instance +func New(cnf *config.Config) iface.Broker { + return &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)} +} + +// StartConsuming enters a loop and waits for incoming messages +func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) + + queueName := taskProcessor.CustomQueue() + if queueName == "" { + queueName = b.GetConfig().DefaultQueue + } + + conn, channel, queue, _, amqpCloseChan, err := b.Connect( + b.GetConfig().Broker, + b.GetConfig().MultipleBrokerSeparator, + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + queueName, // queue name + true, // queue durable + false, // queue delete when unused + b.GetConfig().AMQP.BindingKey, // queue binding key + nil, // exchange declare args + amqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), // queue declare args + amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args + ) + if err != nil { + b.GetRetryFunc()(b.GetRetryStopChan()) + return b.GetRetry(), err + } + defer b.Close(channel, conn) + + if err = channel.Qos( + b.GetConfig().AMQP.PrefetchCount, + 0, // prefetch size + false, // global + ); err != nil { + return b.GetRetry(), fmt.Errorf("Channel qos error: %s", err) + } + + deliveries, err := channel.Consume( + queue.Name, // queue + consumerTag, // consumer tag + false, // auto-ack + false, // exclusive + false, // no-local + false, // no-wait + nil, // arguments + ) + if err != nil { + return b.GetRetry(), fmt.Errorf("Queue consume error: %s", err) + } + + log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") + + if err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil { + return b.GetRetry(), err + } + + // Waiting for any tasks being processed to finish + b.processingWG.Wait() + + return b.GetRetry(), nil +} + +// StopConsuming quits the loop +func (b *Broker) StopConsuming() { + b.Broker.StopConsuming() + + // Waiting for any tasks being processed to finish + b.processingWG.Wait() +} + +// GetOrOpenConnection will return a connection on a particular queue name. Open connections +// are saved to avoid having to reopen connection for multiple queues +func (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) { + var err error + + b.connectionsMutex.Lock() + defer b.connectionsMutex.Unlock() + + conn, ok := b.connections[queueName] + if !ok { + conn = &AMQPConnection{ + queueName: queueName, + cleanup: make(chan struct{}), + } + conn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect( + b.GetConfig().Broker, + b.GetConfig().MultipleBrokerSeparator, + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + queueName, // queue name + true, // queue durable + false, // queue delete when unused + queueBindingKey, // queue binding key + exchangeDeclareArgs, // exchange declare args + queueDeclareArgs, // queue declare args + queueBindingArgs, // queue binding args + ) + if err != nil { + return nil, errors.Wrapf(err, "Failed to connect to queue %s", queueName) + } + + // Reconnect to the channel if it disconnects/errors out + go func() { + select { + case err = <-conn.errorchan: + log.INFO.Printf("Error occurred on queue: %s. Reconnecting", queueName) + b.connectionsMutex.Lock() + delete(b.connections, queueName) + b.connectionsMutex.Unlock() + _, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs) + if err != nil { + log.ERROR.Printf("Failed to reopen queue: %s.", queueName) + } + case <-conn.cleanup: + return + } + }() + b.connections[queueName] = conn + } + return conn, nil +} + +func (b *Broker) CloseConnections() error { + b.connectionsMutex.Lock() + defer b.connectionsMutex.Unlock() + + for key, conn := range b.connections { + if err := b.Close(conn.channel, conn.connection); err != nil { + log.ERROR.Print("Failed to close channel") + return nil + } + close(conn.cleanup) + delete(b.connections, key) + } + return nil +} + +// Publish places a new message on the default queue +func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { + // Adjust routing key (this decides which queue the message will be published to) + b.AdjustRoutingKey(signature) + + msg, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + // Check the ETA signature field, if it is set and it is in the future, + // delay the task + if signature.ETA != nil { + now := time.Now().UTC() + + if signature.ETA.After(now) { + delayMs := int64(signature.ETA.Sub(now) / time.Millisecond) + + return b.delay(signature, delayMs) + } + } + + queue := b.GetConfig().DefaultQueue + bindingKey := b.GetConfig().AMQP.BindingKey // queue binding key + if b.isDirectExchange() { + queue = signature.RoutingKey + bindingKey = signature.RoutingKey + } + + connection, err := b.GetOrOpenConnection( + queue, + bindingKey, // queue binding key + nil, // exchange declare args + amqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), // queue declare args + amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args + ) + if err != nil { + return errors.Wrapf(err, "Failed to get a connection for queue %s", queue) + } + + channel := connection.channel + confirmsChan := connection.confirmation + + if err := channel.Publish( + b.GetConfig().AMQP.Exchange, // exchange name + signature.RoutingKey, // routing key + false, // mandatory + false, // immediate + amqp.Publishing{ + Headers: amqp.Table(signature.Headers), + ContentType: "application/json", + Body: msg, + Priority: signature.Priority, + DeliveryMode: amqp.Persistent, + }, + ); err != nil { + return errors.Wrap(err, "Failed to publish task") + } + + confirmed := <-confirmsChan + + if confirmed.Ack { + return nil + } + + return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag) +} + +// consume takes delivered messages from the channel and manages a worker pool +// to process tasks concurrently +func (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error { + pool := make(chan struct{}, concurrency) + + // initialize worker pool with maxWorkers workers + go func() { + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + }() + + // make channel with a capacity makes it become a buffered channel so that a worker which wants to + // push an error to `errorsChan` doesn't need to be blocked while the for-loop is blocked waiting + // a worker, that is, it avoids a possible deadlock + errorsChan := make(chan error, 1) + + for { + select { + case amqpErr := <-amqpCloseChan: + return amqpErr + case err := <-errorsChan: + return err + case d := <-deliveries: + if concurrency > 0 { + // get worker from pool (blocks until one is available) + <-pool + } + + b.processingWG.Add(1) + + // Consume the task inside a gotourine so multiple tasks + // can be processed concurrently + go func() { + if err := b.consumeOne(d, taskProcessor, true); err != nil { + errorsChan <- err + } + + b.processingWG.Done() + + if concurrency > 0 { + // give worker back to pool + pool <- struct{}{} + } + }() + case <-b.GetStopChan(): + return nil + } + } +} + +// consumeOne processes a single message using TaskProcessor +func (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor, ack bool) error { + if len(delivery.Body) == 0 { + delivery.Nack(true, false) // multiple, requeue + return errors.New("Received an empty message") // RabbitMQ down? + } + + var multiple, requeue = false, false + + // Unmarshal message body into signature struct + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(delivery.Body)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + delivery.Nack(multiple, requeue) + return errs.NewErrCouldNotUnmarshalTaskSignature(delivery.Body, err) + } + + // If the task is not registered, we nack it and requeue, + // there might be different workers for processing specific tasks + if !b.IsTaskRegistered(signature.Name) { + requeue = true + log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery.Body) + + if !signature.IgnoreWhenTaskNotRegistered { + delivery.Nack(multiple, requeue) + } + + return nil + } + + log.DEBUG.Printf("Received new message: %s", delivery.Body) + + err := taskProcessor.Process(signature) + if ack { + delivery.Ack(multiple) + } + return err +} + +// delay a task by delayDuration miliseconds, the way it works is a new queue +// is created without any consumers, the message is then published to this queue +// with appropriate ttl expiration headers, after the expiration, it is sent to +// the proper queue with consumers +func (b *Broker) delay(signature *tasks.Signature, delayMs int64) error { + if delayMs <= 0 { + return errors.New("Cannot delay task by 0ms") + } + + message, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + // It's necessary to redeclare the queue each time (to zero its TTL timer). + queueName := fmt.Sprintf( + "delay.%d.%s.%s", + delayMs, // delay duration in mileseconds + b.GetConfig().AMQP.Exchange, + signature.RoutingKey, // routing key + ) + declareQueueArgs := amqp.Table{ + // Exchange where to send messages after TTL expiration. + "x-dead-letter-exchange": b.GetConfig().AMQP.Exchange, + // Routing key which use when resending expired messages. + "x-dead-letter-routing-key": signature.RoutingKey, + // Time in milliseconds + // after that message will expire and be sent to destination. + "x-message-ttl": delayMs, + // Time after that the queue will be deleted. + "x-expires": delayMs * 2, + } + conn, channel, _, _, _, err := b.Connect( + b.GetConfig().Broker, + b.GetConfig().MultipleBrokerSeparator, + b.GetConfig().TLSConfig, + b.GetConfig().AMQP.Exchange, // exchange name + b.GetConfig().AMQP.ExchangeType, // exchange type + queueName, // queue name + true, // queue durable + b.GetConfig().AMQP.AutoDelete, // queue delete when unused + queueName, // queue binding key + nil, // exchange declare args + declareQueueArgs, // queue declare args + amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args + ) + if err != nil { + return err + } + + defer b.Close(channel, conn) + + if err := channel.Publish( + b.GetConfig().AMQP.Exchange, // exchange + queueName, // routing key + false, // mandatory + false, // immediate + amqp.Publishing{ + Headers: amqp.Table(signature.Headers), + ContentType: "application/json", + Body: message, + DeliveryMode: amqp.Persistent, + }, + ); err != nil { + return err + } + + return nil +} + +func (b *Broker) isDirectExchange() bool { + return b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == "direct" +} + +// AdjustRoutingKey makes sure the routing key is correct. +// If the routing key is an empty string: +// a) set it to binding key for direct exchange type +// b) set it to default queue name +func (b *Broker) AdjustRoutingKey(s *tasks.Signature) { + if s.RoutingKey != "" { + return + } + + if b.isDirectExchange() { + // The routing algorithm behind a direct exchange is simple - a message goes + // to the queues whose binding key exactly matches the routing key of the message. + s.RoutingKey = b.GetConfig().AMQP.BindingKey + return + } + + s.RoutingKey = b.GetConfig().DefaultQueue +} + +// Helper type for GetPendingTasks to accumulate signatures +type sigDumper struct { + customQueue string + Signatures []*tasks.Signature +} + +func (s *sigDumper) Process(sig *tasks.Signature) error { + s.Signatures = append(s.Signatures, sig) + return nil +} + +func (s *sigDumper) CustomQueue() string { + return s.customQueue +} + +func (_ *sigDumper) PreConsumeHandler() bool { + return true +} + +func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { + if queue == "" { + queue = b.GetConfig().DefaultQueue + } + + bindingKey := b.GetConfig().AMQP.BindingKey // queue binding key + conn, err := b.GetOrOpenConnection( + queue, + bindingKey, // queue binding key + nil, // exchange declare args + amqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), // queue declare args + amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args + ) + if err != nil { + return nil, errors.Wrapf(err, "Failed to get a connection for queue %s", queue) + } + + channel := conn.channel + queueInfo, err := channel.QueueInspect(queue) + if err != nil { + return nil, errors.Wrapf(err, "Failed to get info for queue %s", queue) + } + + var tag uint64 + defer channel.Nack(tag, true, true) // multiple, requeue + + dumper := &sigDumper{customQueue: queue} + for i := 0; i < queueInfo.Messages; i++ { + d, _, err := channel.Get(queue, false) + if err != nil { + return nil, errors.Wrap(err, "Failed to get from queue") + } + tag = d.DeliveryTag + b.consumeOne(d, dumper, false) + } + + return dumper.Signatures, nil +} diff --git a/v2/brokers/amqp/amqp_concurrence_test.go b/v2/brokers/amqp/amqp_concurrence_test.go new file mode 100644 index 000000000..67c44fd1e --- /dev/null +++ b/v2/brokers/amqp/amqp_concurrence_test.go @@ -0,0 +1,60 @@ +package amqp + +import ( + "fmt" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/streadway/amqp" + "testing" + "time" +) + +type doNothingProcessor struct{} + +func (_ doNothingProcessor) Process(signature *tasks.Signature) error { + return fmt.Errorf("failed") +} + +func (_ doNothingProcessor) CustomQueue() string { + return "oops" +} + +func (_ doNothingProcessor) PreConsumeHandler() bool { + return true +} + +func TestConsume(t *testing.T) { + var ( + iBroker iface.Broker + deliveries = make(chan amqp.Delivery, 3) + closeChan chan *amqp.Error + processor doNothingProcessor + ) + + t.Run("with deliveries more than the number of concurrency", func(t *testing.T) { + iBroker = New(&config.Config{}) + broker, _ := iBroker.(*Broker) + errChan := make(chan error) + + // simulate that there are too much deliveries + go func() { + for i := 0; i < 3; i++ { + deliveries <- amqp.Delivery{} // broker.consumeOne() will complain this error: Received an empty message + } + }() + + go func() { + err := broker.consume(deliveries, 2, processor, closeChan) + if err != nil { + errChan <- err + } + }() + + select { + case <-errChan: + case <-time.After(1 * time.Second): + t.Error("Maybe deadlock") + } + }) +} diff --git a/v2/brokers/amqp/amqp_test.go b/v2/brokers/amqp/amqp_test.go new file mode 100644 index 000000000..8b4b48011 --- /dev/null +++ b/v2/brokers/amqp/amqp_test.go @@ -0,0 +1,46 @@ +package amqp_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/brokers/amqp" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestAdjustRoutingKey(t *testing.T) { + t.Parallel() + + var ( + s *tasks.Signature + broker iface.Broker + ) + + t.Run("with routing and binding keys", func(t *testing.T) { + s := &tasks.Signature{RoutingKey: "routing_key"} + broker = amqp.New(&config.Config{ + DefaultQueue: "queue", + AMQP: &config.AMQPConfig{ + ExchangeType: "direct", + BindingKey: "binding_key", + }, + }) + broker.AdjustRoutingKey(s) + assert.Equal(t, "routing_key", s.RoutingKey) + }) + + t.Run("with binding key", func(t *testing.T) { + s = new(tasks.Signature) + broker = amqp.New(&config.Config{ + DefaultQueue: "queue", + AMQP: &config.AMQPConfig{ + ExchangeType: "direct", + BindingKey: "binding_key", + }, + }) + broker.AdjustRoutingKey(s) + assert.Equal(t, "binding_key", s.RoutingKey) + }) +} diff --git a/v2/brokers/eager/eager.go b/v2/brokers/eager/eager.go new file mode 100644 index 000000000..49a6bcbf7 --- /dev/null +++ b/v2/brokers/eager/eager.go @@ -0,0 +1,68 @@ +package eager + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Broker represents an "eager" in-memory broker +type Broker struct { + worker iface.TaskProcessor + common.Broker +} + +// New creates new Broker instance +func New() iface.Broker { + return new(Broker) +} + +// Mode interface with methods specific for this broker +type Mode interface { + AssignWorker(p iface.TaskProcessor) +} + +// StartConsuming enters a loop and waits for incoming messages +func (eagerBroker *Broker) StartConsuming(consumerTag string, concurrency int, p iface.TaskProcessor) (bool, error) { + return true, nil +} + +// StopConsuming quits the loop +func (eagerBroker *Broker) StopConsuming() { + // do nothing +} + +// Publish places a new message on the default queue +func (eagerBroker *Broker) Publish(ctx context.Context, task *tasks.Signature) error { + if eagerBroker.worker == nil { + return errors.New("worker is not assigned in eager-mode") + } + + // faking the behavior to marshal input into json + // and unmarshal it back + message, err := json.Marshal(task) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(message)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return fmt.Errorf("JSON unmarshal error: %s", err) + } + + // blocking call to the task directly + return eagerBroker.worker.Process(signature) +} + +// AssignWorker assigns a worker to the eager broker +func (eagerBroker *Broker) AssignWorker(w iface.TaskProcessor) { + eagerBroker.worker = w +} diff --git a/v2/brokers/errs/errors.go b/v2/brokers/errs/errors.go new file mode 100644 index 000000000..8c0f677c2 --- /dev/null +++ b/v2/brokers/errs/errors.go @@ -0,0 +1,28 @@ +package errs + +import ( + "errors" + "fmt" +) + +// ErrCouldNotUnmarshalTaskSignature ... +type ErrCouldNotUnmarshalTaskSignature struct { + msg []byte + reason string +} + +// Error implements the error interface +func (e ErrCouldNotUnmarshalTaskSignature) Error() string { + return fmt.Sprintf("Could not unmarshal '%s' into a task signature: %v", e.msg, e.reason) +} + +// NewErrCouldNotUnmarshalTaskSignature returns new ErrCouldNotUnmarshalTaskSignature instance +func NewErrCouldNotUnmarshalTaskSignature(msg []byte, err error) ErrCouldNotUnmarshalTaskSignature { + return ErrCouldNotUnmarshalTaskSignature{msg: msg, reason: err.Error()} +} + +// ErrConsumerStopped indicates that the operation is now illegal because of the consumer being stopped. +var ErrConsumerStopped = errors.New("the server has been stopped") + +// ErrStopTaskDeletion indicates that the task should not be deleted from source after task failure +var ErrStopTaskDeletion = errors.New("task should not be deleted") diff --git a/v2/brokers/gcppubsub/gcp_pubsub.go b/v2/brokers/gcppubsub/gcp_pubsub.go new file mode 100644 index 000000000..a10e68ecc --- /dev/null +++ b/v2/brokers/gcppubsub/gcp_pubsub.go @@ -0,0 +1,196 @@ +package gcppubsub + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "cloud.google.com/go/pubsub" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Broker represents an Google Cloud Pub/Sub broker +type Broker struct { + common.Broker + + service *pubsub.Client + subscriptionName string + MaxExtension time.Duration + + stopDone chan struct{} +} + +// New creates new Broker instance +func New(cnf *config.Config, projectID, subscriptionName string) (iface.Broker, error) { + b := &Broker{Broker: common.NewBroker(cnf), stopDone: make(chan struct{})} + b.subscriptionName = subscriptionName + + ctx := context.Background() + + if cnf.GCPPubSub != nil { + b.MaxExtension = cnf.GCPPubSub.MaxExtension + } + + if cnf.GCPPubSub != nil && cnf.GCPPubSub.Client != nil { + b.service = cnf.GCPPubSub.Client + } else { + pubsubClient, err := pubsub.NewClient(ctx, projectID) + if err != nil { + return nil, err + } + b.service = pubsubClient + cnf.GCPPubSub = &config.GCPPubSubConfig{ + Client: pubsubClient, + } + } + + // Validate topic exists + defaultQueue := b.GetConfig().DefaultQueue + topic := b.service.Topic(defaultQueue) + defer topic.Stop() + + topicExists, err := topic.Exists(ctx) + if err != nil { + return nil, err + } + if !topicExists { + return nil, fmt.Errorf("topic does not exist, instead got %s", defaultQueue) + } + + // Validate subscription exists + sub := b.service.Subscription(b.subscriptionName) + + if b.MaxExtension != 0 { + sub.ReceiveSettings.MaxExtension = b.MaxExtension + } + + subscriptionExists, err := sub.Exists(ctx) + if err != nil { + return nil, err + } + if !subscriptionExists { + return nil, fmt.Errorf("subscription does not exist, instead got %s", b.subscriptionName) + } + + return b, nil +} + +// StartConsuming enters a loop and waits for incoming messages +func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) + + sub := b.service.Subscription(b.subscriptionName) + + if b.MaxExtension != 0 { + sub.ReceiveSettings.MaxExtension = b.MaxExtension + } + + sub.ReceiveSettings.NumGoroutines = concurrency + log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-b.GetStopChan() + cancel() + }() + + for { + err := sub.Receive(ctx, func(_ctx context.Context, msg *pubsub.Message) { + b.consumeOne(msg, taskProcessor) + }) + if err == nil { + break + } + + log.ERROR.Printf("Error when receiving messages. Error: %v", err) + continue + } + + close(b.stopDone) + + return b.GetRetry(), nil +} + +// StopConsuming quits the loop +func (b *Broker) StopConsuming() { + b.Broker.StopConsuming() + + // Waiting for any tasks being processed to finish + <-b.stopDone +} + +// Publish places a new message on the default queue or the queue pointed to +// by the routing key +func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { + // Adjust routing key (this decides which queue the message will be published to) + b.AdjustRoutingKey(signature) + + msg, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + topic := b.service.Topic(signature.RoutingKey) + defer topic.Stop() + + // Check the ETA signature field, if it is set and it is in the future, + // delay the task + if signature.ETA != nil { + now := time.Now().UTC() + + if signature.ETA.After(now) { + topic.PublishSettings.DelayThreshold = signature.ETA.Sub(now) + } + } + + result := topic.Publish(ctx, &pubsub.Message{ + Data: msg, + }) + + id, err := result.Get(ctx) + if err != nil { + log.ERROR.Printf("Error when sending a message: %v", err) + return err + } + + log.INFO.Printf("Sending a message successfully, server-generated message ID %v", id) + return nil +} + +// consumeOne processes a single message using TaskProcessor +func (b *Broker) consumeOne(delivery *pubsub.Message, taskProcessor iface.TaskProcessor) { + if len(delivery.Data) == 0 { + delivery.Nack() + log.ERROR.Printf("received an empty message, the delivery was %v", delivery) + } + + sig := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewBuffer(delivery.Data)) + decoder.UseNumber() + if err := decoder.Decode(sig); err != nil { + delivery.Nack() + log.ERROR.Printf("unmarshal error. the delivery is %v", delivery) + } + + // If the task is not registered return an error + // and leave the message in the queue + if !b.IsTaskRegistered(sig.Name) { + delivery.Nack() + log.ERROR.Printf("task %s is not registered", sig.Name) + } + + err := taskProcessor.Process(sig) + if err != nil { + delivery.Nack() + log.ERROR.Printf("Failed process of task", err) + } + + // Call Ack() after successfully consuming and processing the message + delivery.Ack() +} diff --git a/v2/brokers/iface/interfaces.go b/v2/brokers/iface/interfaces.go new file mode 100644 index 000000000..95fafbb5e --- /dev/null +++ b/v2/brokers/iface/interfaces.go @@ -0,0 +1,29 @@ +package iface + +import ( + "context" + + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// Broker - a common interface for all brokers +type Broker interface { + GetConfig() *config.Config + SetRegisteredTaskNames(names []string) + IsTaskRegistered(name string) bool + StartConsuming(consumerTag string, concurrency int, p TaskProcessor) (bool, error) + StopConsuming() + Publish(ctx context.Context, task *tasks.Signature) error + GetPendingTasks(queue string) ([]*tasks.Signature, error) + GetDelayedTasks() ([]*tasks.Signature, error) + AdjustRoutingKey(s *tasks.Signature) +} + +// TaskProcessor - can process a delivered task +// This will probably always be a worker instance +type TaskProcessor interface { + Process(signature *tasks.Signature) error + CustomQueue() string + PreConsumeHandler() bool +} diff --git a/v2/brokers/package.go b/v2/brokers/package.go new file mode 100644 index 000000000..589519a6b --- /dev/null +++ b/v2/brokers/package.go @@ -0,0 +1 @@ +package brokers diff --git a/v2/brokers/redis/goredis.go b/v2/brokers/redis/goredis.go new file mode 100644 index 000000000..69fe87acc --- /dev/null +++ b/v2/brokers/redis/goredis.go @@ -0,0 +1,422 @@ +package redis + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8" + "github.com/go-redsync/redsync/v4" + + "github.com/RichardKnop/machinery/v2/brokers/errs" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +// BrokerGR represents a Redis broker +type BrokerGR struct { + common.Broker + rclient redis.UniversalClient + consumingWG sync.WaitGroup // wait group to make sure whole consumption completes + processingWG sync.WaitGroup // use wait group to make sure task processing completes + delayedWG sync.WaitGroup + // If set, path to a socket file overrides hostname + socketPath string + redsync *redsync.Redsync + redisOnce sync.Once + redisDelayedTasksKey string +} + +// NewGR creates new Broker instance +func NewGR(cnf *config.Config, addrs []string, db int) iface.Broker { + b := &BrokerGR{Broker: common.NewBroker(cnf)} + + var password string + parts := strings.Split(addrs[0], "@") + if len(parts) == 2 { + // with password + password = parts[0] + addrs[0] = parts[1] + } + + ropt := &redis.UniversalOptions{ + Addrs: addrs, + DB: db, + Password: password, + } + if cnf.Redis != nil { + ropt.MasterName = cnf.Redis.MasterName + } + + b.rclient = redis.NewUniversalClient(ropt) + if cnf.Redis.DelayedTasksKey != "" { + b.redisDelayedTasksKey = cnf.Redis.DelayedTasksKey + } else { + b.redisDelayedTasksKey = defaultRedisDelayedTasksKey + } + return b +} + +// StartConsuming enters a loop and waits for incoming messages +func (b *BrokerGR) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { + b.consumingWG.Add(1) + defer b.consumingWG.Done() + + if concurrency < 1 { + concurrency = runtime.NumCPU() * 2 + } + + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) + + // Ping the server to make sure connection is live + _, err := b.rclient.Ping(context.Background()).Result() + if err != nil { + b.GetRetryFunc()(b.GetRetryStopChan()) + + // Return err if retry is still true. + // If retry is false, broker.StopConsuming() has been called and + // therefore Redis might have been stopped. Return nil exit + // StartConsuming() + if b.GetRetry() { + return b.GetRetry(), err + } + return b.GetRetry(), errs.ErrConsumerStopped + } + + // Channel to which we will push tasks ready for processing by worker + deliveries := make(chan []byte, concurrency) + pool := make(chan struct{}, concurrency) + + // initialize worker pool with maxWorkers workers + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + + // A receiving goroutine keeps popping messages from the queue by BLPOP + // If the message is valid and can be unmarshaled into a proper structure + // we send it to the deliveries channel + go func() { + + log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") + + for { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.GetStopChan(): + close(deliveries) + return + case <-pool: + task, _ := b.nextTask(getQueueGR(b.GetConfig(), taskProcessor)) + //TODO: should this error be ignored? + if len(task) > 0 { + deliveries <- task + } + + pool <- struct{}{} + } + } + }() + + // A goroutine to watch for delayed tasks and push them to deliveries + // channel for consumption by the worker + b.delayedWG.Add(1) + go func() { + defer b.delayedWG.Done() + + for { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.GetStopChan(): + return + default: + task, err := b.nextDelayedTask(b.redisDelayedTasksKey) + if err != nil { + continue + } + + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(task)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + log.ERROR.Print(errs.NewErrCouldNotUnmarshalTaskSignature(task, err)) + } + + if err := b.Publish(context.Background(), signature); err != nil { + log.ERROR.Print(err) + } + } + } + }() + + if err := b.consume(deliveries, concurrency, taskProcessor); err != nil { + return b.GetRetry(), err + } + + // Waiting for any tasks being processed to finish + b.processingWG.Wait() + + return b.GetRetry(), nil +} + +// StopConsuming quits the loop +func (b *BrokerGR) StopConsuming() { + b.Broker.StopConsuming() + // Waiting for the delayed tasks goroutine to have stopped + b.delayedWG.Wait() + // Waiting for consumption to finish + b.consumingWG.Wait() + + b.rclient.Close() +} + +// Publish places a new message on the default queue +func (b *BrokerGR) Publish(ctx context.Context, signature *tasks.Signature) error { + // Adjust routing key (this decides which queue the message will be published to) + b.Broker.AdjustRoutingKey(signature) + + msg, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + // Check the ETA signature field, if it is set and it is in the future, + // delay the task + if signature.ETA != nil { + now := time.Now().UTC() + + if signature.ETA.After(now) { + score := signature.ETA.UnixNano() + err = b.rclient.ZAdd(context.Background(), b.redisDelayedTasksKey, &redis.Z{Score: float64(score), Member: msg}).Err() + return err + } + } + + err = b.rclient.RPush(context.Background(), signature.RoutingKey, msg).Err() + return err +} + +// GetPendingTasks returns a slice of task signatures waiting in the queue +func (b *BrokerGR) GetPendingTasks(queue string) ([]*tasks.Signature, error) { + + if queue == "" { + queue = b.GetConfig().DefaultQueue + } + results, err := b.rclient.LRange(context.Background(), queue, 0, -1).Result() + if err != nil { + return nil, err + } + + taskSignatures := make([]*tasks.Signature, len(results)) + for i, result := range results { + signature := new(tasks.Signature) + decoder := json.NewDecoder(strings.NewReader(result)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return nil, err + } + taskSignatures[i] = signature + } + return taskSignatures, nil +} + +// GetDelayedTasks returns a slice of task signatures that are scheduled, but not yet in the queue +func (b *BrokerGR) GetDelayedTasks() ([]*tasks.Signature, error) { + results, err := b.rclient.ZRange(context.Background(), b.redisDelayedTasksKey, 0, -1).Result() + if err != nil { + return nil, err + } + + taskSignatures := make([]*tasks.Signature, len(results)) + for i, result := range results { + signature := new(tasks.Signature) + decoder := json.NewDecoder(strings.NewReader(result)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return nil, err + } + taskSignatures[i] = signature + } + return taskSignatures, nil +} + +// consume takes delivered messages from the channel and manages a worker pool +// to process tasks concurrently +func (b *BrokerGR) consume(deliveries <-chan []byte, concurrency int, taskProcessor iface.TaskProcessor) error { + errorsChan := make(chan error, concurrency*2) + pool := make(chan struct{}, concurrency) + + // init pool for Worker tasks execution, as many slots as Worker concurrency param + go func() { + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + }() + + for { + select { + case err := <-errorsChan: + return err + case d, open := <-deliveries: + if !open { + return nil + } + if concurrency > 0 { + // get execution slot from pool (blocks until one is available) + <-pool + } + + b.processingWG.Add(1) + + // Consume the task inside a goroutine so multiple tasks + // can be processed concurrently + go func() { + if err := b.consumeOne(d, taskProcessor); err != nil { + errorsChan <- err + } + + b.processingWG.Done() + + if concurrency > 0 { + // give slot back to pool + pool <- struct{}{} + } + }() + } + } +} + +// consumeOne processes a single message using TaskProcessor +func (b *BrokerGR) consumeOne(delivery []byte, taskProcessor iface.TaskProcessor) error { + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(delivery)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return errs.NewErrCouldNotUnmarshalTaskSignature(delivery, err) + } + + // If the task is not registered, we requeue it, + // there might be different workers for processing specific tasks + if !b.IsTaskRegistered(signature.Name) { + log.INFO.Printf("Task not registered with this worker. Requeuing message: %s", delivery) + + b.rclient.RPush(context.Background(), getQueueGR(b.GetConfig(), taskProcessor), delivery) + return nil + } + + log.DEBUG.Printf("Received new message: %s", delivery) + + return taskProcessor.Process(signature) +} + +// nextTask pops next available task from the default queue +func (b *BrokerGR) nextTask(queue string) (result []byte, err error) { + + pollPeriodMilliseconds := 1000 // default poll period for normal tasks + if b.GetConfig().Redis != nil { + configuredPollPeriod := b.GetConfig().Redis.NormalTasksPollPeriod + if configuredPollPeriod > 0 { + pollPeriodMilliseconds = configuredPollPeriod + } + } + pollPeriod := time.Duration(pollPeriodMilliseconds) * time.Millisecond + + items, err := b.rclient.BLPop(context.Background(), pollPeriod, queue).Result() + if err != nil { + return []byte{}, err + } + + // items[0] - the name of the key where an element was popped + // items[1] - the value of the popped element + if len(items) != 2 { + return []byte{}, redis.Nil + } + + result = []byte(items[1]) + + return result, nil +} + +// nextDelayedTask pops a value from the ZSET key using WATCH/MULTI/EXEC commands. +func (b *BrokerGR) nextDelayedTask(key string) (result []byte, err error) { + + //pipe := b.rclient.Pipeline() + // + //defer func() { + // // Return connection to normal state on error. + // // https://redis.io/commands/discard + // if err != nil { + // pipe.Discard() + // } + //}() + + var ( + items []string + ) + + pollPeriod := 500 // default poll period for delayed tasks + if b.GetConfig().Redis != nil { + configuredPollPeriod := b.GetConfig().Redis.DelayedTasksPollPeriod + // the default period is 0, which bombards redis with requests, despite + // our intention of doing the opposite + if configuredPollPeriod > 0 { + pollPeriod = configuredPollPeriod + } + } + + for { + // Space out queries to ZSET so we don't bombard redis + // server with relentless ZRANGEBYSCOREs + time.Sleep(time.Duration(pollPeriod) * time.Millisecond) + watchFunc := func(tx *redis.Tx) error { + + now := time.Now().UTC().UnixNano() + + // https://redis.io/commands/zrangebyscore + ctx := context.Background() + items, err = tx.ZRevRangeByScore(ctx, key, &redis.ZRangeBy{ + Min: "0", Max: strconv.FormatInt(now, 10), Offset: 0, Count: 1, + }).Result() + if err != nil { + return err + } + if len(items) != 1 { + return redis.Nil + } + + // only return the first zrange value if there are no other changes in this key + // to make sure a delayed task would only be consumed once + _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.ZRem(ctx, key, items[0]) + result = []byte(items[0]) + return nil + }) + + return err + } + + if err = b.rclient.Watch(context.Background(), watchFunc, key); err != nil { + return + } else { + break + } + } + + return +} + +func getQueueGR(config *config.Config, taskProcessor iface.TaskProcessor) string { + customQueue := taskProcessor.CustomQueue() + if customQueue == "" { + return config.DefaultQueue + } + return customQueue +} diff --git a/v2/brokers/redis/redis.go b/v2/brokers/redis/redis.go new file mode 100644 index 000000000..eeb3ca78e --- /dev/null +++ b/v2/brokers/redis/redis.go @@ -0,0 +1,487 @@ +package redis + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "runtime" + "sync" + "time" + + "github.com/go-redsync/redsync/v4" + redsyncredis "github.com/go-redsync/redsync/v4/redis/redigo" + "github.com/gomodule/redigo/redis" + + "github.com/RichardKnop/machinery/v2/brokers/errs" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" +) + +const defaultRedisDelayedTasksKey = "delayed_tasks" + +// Broker represents a Redis broker +type Broker struct { + common.Broker + common.RedisConnector + host string + password string + db int + pool *redis.Pool + consumingWG sync.WaitGroup // wait group to make sure whole consumption completes + processingWG sync.WaitGroup // use wait group to make sure task processing completes + delayedWG sync.WaitGroup + // If set, path to a socket file overrides hostname + socketPath string + redsync *redsync.Redsync + redisOnce sync.Once + redisDelayedTasksKey string +} + +// New creates new Broker instance +func New(cnf *config.Config, host, password, socketPath string, db int) iface.Broker { + b := &Broker{Broker: common.NewBroker(cnf)} + b.host = host + b.db = db + b.password = password + b.socketPath = socketPath + + if cnf.Redis != nil && cnf.Redis.DelayedTasksKey != "" { + b.redisDelayedTasksKey = cnf.Redis.DelayedTasksKey + } else { + b.redisDelayedTasksKey = defaultRedisDelayedTasksKey + } + + return b +} + +// StartConsuming enters a loop and waits for incoming messages +func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { + b.consumingWG.Add(1) + defer b.consumingWG.Done() + + if concurrency < 1 { + concurrency = runtime.NumCPU() * 2 + } + + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) + + conn := b.open() + defer conn.Close() + + // Ping the server to make sure connection is live + _, err := conn.Do("PING") + if err != nil { + b.GetRetryFunc()(b.GetRetryStopChan()) + + // Return err if retry is still true. + // If retry is false, broker.StopConsuming() has been called and + // therefore Redis might have been stopped. Return nil exit + // StartConsuming() + if b.GetRetry() { + return b.GetRetry(), err + } + return b.GetRetry(), errs.ErrConsumerStopped + } + + // Channel to which we will push tasks ready for processing by worker + deliveries := make(chan []byte, concurrency) + pool := make(chan struct{}, concurrency) + + // initialize worker pool with maxWorkers workers + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + + // A receiving goroutine keeps popping messages from the queue by BLPOP + // If the message is valid and can be unmarshaled into a proper structure + // we send it to the deliveries channel + go func() { + + log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") + + for { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.GetStopChan(): + close(deliveries) + return + case <-pool: + select { + case <-b.GetStopChan(): + close(deliveries) + return + default: + } + + if taskProcessor.PreConsumeHandler() { + task, _ := b.nextTask(getQueue(b.GetConfig(), taskProcessor)) + //TODO: should this error be ignored? + if len(task) > 0 { + deliveries <- task + } + } + + pool <- struct{}{} + } + } + }() + + // A goroutine to watch for delayed tasks and push them to deliveries + // channel for consumption by the worker + b.delayedWG.Add(1) + go func() { + defer b.delayedWG.Done() + + for { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.GetStopChan(): + return + default: + task, err := b.nextDelayedTask(b.redisDelayedTasksKey) + if err != nil { + continue + } + + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(task)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + log.ERROR.Print(errs.NewErrCouldNotUnmarshalTaskSignature(task, err)) + } + + if err := b.Publish(context.Background(), signature); err != nil { + log.ERROR.Print(err) + } + } + } + }() + + if err := b.consume(deliveries, concurrency, taskProcessor); err != nil { + return b.GetRetry(), err + } + + // Waiting for any tasks being processed to finish + b.processingWG.Wait() + + return b.GetRetry(), nil +} + +// StopConsuming quits the loop +func (b *Broker) StopConsuming() { + b.Broker.StopConsuming() + // Waiting for the delayed tasks goroutine to have stopped + b.delayedWG.Wait() + // Waiting for consumption to finish + b.consumingWG.Wait() + // Wait for currently processing tasks to finish as well. + b.processingWG.Wait() + + if b.pool != nil { + b.pool.Close() + } +} + +// Publish places a new message on the default queue +func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { + // Adjust routing key (this decides which queue the message will be published to) + b.Broker.AdjustRoutingKey(signature) + + msg, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + conn := b.open() + defer conn.Close() + + // Check the ETA signature field, if it is set and it is in the future, + // delay the task + if signature.ETA != nil { + now := time.Now().UTC() + + if signature.ETA.After(now) { + score := signature.ETA.UnixNano() + _, err = conn.Do("ZADD", b.redisDelayedTasksKey, score, msg) + return err + } + } + + _, err = conn.Do("RPUSH", signature.RoutingKey, msg) + return err +} + +// GetPendingTasks returns a slice of task signatures waiting in the queue +func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { + conn := b.open() + defer conn.Close() + + if queue == "" { + queue = b.GetConfig().DefaultQueue + } + dataBytes, err := conn.Do("LRANGE", queue, 0, -1) + if err != nil { + return nil, err + } + results, err := redis.ByteSlices(dataBytes, err) + if err != nil { + return nil, err + } + + taskSignatures := make([]*tasks.Signature, len(results)) + for i, result := range results { + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(result)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return nil, err + } + taskSignatures[i] = signature + } + return taskSignatures, nil +} + +// GetDelayedTasks returns a slice of task signatures that are scheduled, but not yet in the queue +func (b *Broker) GetDelayedTasks() ([]*tasks.Signature, error) { + conn := b.open() + defer conn.Close() + + dataBytes, err := conn.Do("ZRANGE", b.redisDelayedTasksKey, 0, -1) + if err != nil { + return nil, err + } + results, err := redis.ByteSlices(dataBytes, err) + if err != nil { + return nil, err + } + + taskSignatures := make([]*tasks.Signature, len(results)) + for i, result := range results { + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(result)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return nil, err + } + taskSignatures[i] = signature + } + return taskSignatures, nil +} + +// consume takes delivered messages from the channel and manages a worker pool +// to process tasks concurrently +func (b *Broker) consume(deliveries <-chan []byte, concurrency int, taskProcessor iface.TaskProcessor) error { + errorsChan := make(chan error, concurrency*2) + pool := make(chan struct{}, concurrency) + + // init pool for Worker tasks execution, as many slots as Worker concurrency param + go func() { + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + }() + + for { + select { + case err := <-errorsChan: + return err + case d, open := <-deliveries: + if !open { + return nil + } + if concurrency > 0 { + // get execution slot from pool (blocks until one is available) + select { + case <-b.GetStopChan(): + b.requeueMessage(d, taskProcessor) + continue + case <-pool: + } + } + + b.processingWG.Add(1) + + // Consume the task inside a goroutine so multiple tasks + // can be processed concurrently + go func() { + if err := b.consumeOne(d, taskProcessor); err != nil { + errorsChan <- err + } + + b.processingWG.Done() + + if concurrency > 0 { + // give slot back to pool + pool <- struct{}{} + } + }() + } + } +} + +// consumeOne processes a single message using TaskProcessor +func (b *Broker) consumeOne(delivery []byte, taskProcessor iface.TaskProcessor) error { + signature := new(tasks.Signature) + decoder := json.NewDecoder(bytes.NewReader(delivery)) + decoder.UseNumber() + if err := decoder.Decode(signature); err != nil { + return errs.NewErrCouldNotUnmarshalTaskSignature(delivery, err) + } + + // If the task is not registered, we requeue it, + // there might be different workers for processing specific tasks + if !b.IsTaskRegistered(signature.Name) { + if signature.IgnoreWhenTaskNotRegistered { + return nil + } + log.INFO.Printf("Task not registered with this worker. Requeuing message: %s", delivery) + b.requeueMessage(delivery, taskProcessor) + return nil + } + + log.DEBUG.Printf("Received new message: %s", delivery) + + return taskProcessor.Process(signature) +} + +// nextTask pops next available task from the default queue +func (b *Broker) nextTask(queue string) (result []byte, err error) { + conn := b.open() + defer conn.Close() + + pollPeriodMilliseconds := 1000 // default poll period for normal tasks + if b.GetConfig().Redis != nil { + configuredPollPeriod := b.GetConfig().Redis.NormalTasksPollPeriod + if configuredPollPeriod > 0 { + pollPeriodMilliseconds = configuredPollPeriod + } + } + pollPeriod := time.Duration(pollPeriodMilliseconds) * time.Millisecond + + // Issue 548: BLPOP expects an integer timeout expresses in seconds. + // The call will if the value is a float. Convert to integer using + // math.Ceil(): + // math.Ceil(0.0) --> 0 (block indefinitely) + // math.Ceil(0.2) --> 1 (timeout after 1 second) + pollPeriodSeconds := math.Ceil(pollPeriod.Seconds()) + + items, err := redis.ByteSlices(conn.Do("BLPOP", queue, pollPeriodSeconds)) + if err != nil { + return []byte{}, err + } + + // items[0] - the name of the key where an element was popped + // items[1] - the value of the popped element + if len(items) != 2 { + return []byte{}, redis.ErrNil + } + + result = items[1] + + return result, nil +} + +// nextDelayedTask pops a value from the ZSET key using WATCH/MULTI/EXEC commands. +// https://github.com/gomodule/redigo/blob/master/redis/zpop_example_test.go +func (b *Broker) nextDelayedTask(key string) (result []byte, err error) { + conn := b.open() + defer conn.Close() + + defer func() { + // Return connection to normal state on error. + // https://redis.io/commands/discard + // https://redis.io/commands/unwatch + if err == redis.ErrNil { + conn.Do("UNWATCH") + } else if err != nil { + conn.Do("DISCARD") + } + }() + + var ( + items [][]byte + reply interface{} + ) + + pollPeriod := 500 // default poll period for delayed tasks + if b.GetConfig().Redis != nil { + configuredPollPeriod := b.GetConfig().Redis.DelayedTasksPollPeriod + // the default period is 0, which bombards redis with requests, despite + // our intention of doing the opposite + if configuredPollPeriod > 0 { + pollPeriod = configuredPollPeriod + } + } + + for { + // Space out queries to ZSET so we don't bombard redis + // server with relentless ZRANGEBYSCOREs + time.Sleep(time.Duration(pollPeriod) * time.Millisecond) + if _, err = conn.Do("WATCH", key); err != nil { + return + } + + now := time.Now().UTC().UnixNano() + + // https://redis.io/commands/zrangebyscore + items, err = redis.ByteSlices(conn.Do( + "ZRANGEBYSCORE", + key, + 0, + now, + "LIMIT", + 0, + 1, + )) + if err != nil { + return + } + if len(items) != 1 { + err = redis.ErrNil + return + } + + _ = conn.Send("MULTI") + _ = conn.Send("ZREM", key, items[0]) + reply, err = conn.Do("EXEC") + if err != nil { + return + } + + if reply != nil { + result = items[0] + break + } + } + + return +} + +// open returns or creates instance of Redis connection +func (b *Broker) open() redis.Conn { + b.redisOnce.Do(func() { + b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig) + b.redsync = redsync.New(redsyncredis.NewPool(b.pool)) + }) + + return b.pool.Get() +} + +func getQueue(config *config.Config, taskProcessor iface.TaskProcessor) string { + customQueue := taskProcessor.CustomQueue() + if customQueue == "" { + return config.DefaultQueue + } + return customQueue +} + +func (b *Broker) requeueMessage(delivery []byte, taskProcessor iface.TaskProcessor) { + conn := b.open() + defer conn.Close() + conn.Do("RPUSH", getQueue(b.GetConfig(), taskProcessor), delivery) +} diff --git a/v2/brokers/sqs/sqs.go b/v2/brokers/sqs/sqs.go new file mode 100644 index 000000000..b8cdb0457 --- /dev/null +++ b/v2/brokers/sqs/sqs.go @@ -0,0 +1,368 @@ +package sqs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/RichardKnop/machinery/v2/brokers/errs" + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + + awssqs "github.com/aws/aws-sdk-go/service/sqs" +) + +const ( + maxAWSSQSDelay = time.Minute * 15 // Max supported SQS delay is 15 min: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html +) + +// Broker represents a AWS SQS broker +// There are examples on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sqs-example-create-queue.html +type Broker struct { + common.Broker + processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal + receivingWG sync.WaitGroup + stopReceivingChan chan int + sess *session.Session + service sqsiface.SQSAPI + queueUrl *string +} + +// New creates new Broker instance +func New(cnf *config.Config) iface.Broker { + b := &Broker{Broker: common.NewBroker(cnf)} + if cnf.SQS != nil && cnf.SQS.Client != nil { + // Use provided *SQS client + b.service = cnf.SQS.Client + } else { + // Initialize a session that the SDK will use to load credentials from the shared credentials file, ~/.aws/credentials. + // See details on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html + // Also, env AWS_REGION is also required + b.sess = session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + b.service = awssqs.New(b.sess) + } + + return b +} + +// StartConsuming enters a loop and waits for incoming messages +func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) + qURL := b.getQueueURL(taskProcessor) + //save it so that it can be used later when attempting to delete task + b.queueUrl = qURL + + deliveries := make(chan *awssqs.ReceiveMessageOutput, concurrency) + pool := make(chan struct{}, concurrency) + + // initialize worker pool with maxWorkers workers + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } + b.stopReceivingChan = make(chan int) + b.receivingWG.Add(1) + + go func() { + defer b.receivingWG.Done() + + log.INFO.Printf("[*] Waiting for messages on queue: %s. To exit press CTRL+C\n", *qURL) + + for { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.stopReceivingChan: + close(deliveries) + return + case <-pool: + output, err := b.receiveMessage(qURL) + if err == nil && len(output.Messages) > 0 { + deliveries <- output + + } else { + //return back to pool right away + pool <- struct{}{} + if err != nil { + log.ERROR.Printf("Queue consume error: %s", err) + } + + } + } + + } + }() + + if err := b.consume(deliveries, concurrency, taskProcessor, pool); err != nil { + return b.GetRetry(), err + } + + return b.GetRetry(), nil +} + +// StopConsuming quits the loop +func (b *Broker) StopConsuming() { + b.Broker.StopConsuming() + + b.stopReceiving() + + // Waiting for any tasks being processed to finish + b.processingWG.Wait() + + // Waiting for the receiving goroutine to have stopped + b.receivingWG.Wait() +} + +// Publish places a new message on the default queue +func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { + msg, err := json.Marshal(signature) + if err != nil { + return fmt.Errorf("JSON marshal error: %s", err) + } + + // Check that signature.RoutingKey is set, if not switch to DefaultQueue + b.AdjustRoutingKey(signature) + + MsgInput := &awssqs.SendMessageInput{ + MessageBody: aws.String(string(msg)), + QueueUrl: aws.String(b.GetConfig().Broker + "/" + signature.RoutingKey), + } + + // if this is a fifo queue, there needs to be some additional parameters. + if strings.HasSuffix(signature.RoutingKey, ".fifo") { + // Use Machinery's signature Task UUID as SQS Message Group ID. + MsgDedupID := signature.UUID + MsgInput.MessageDeduplicationId = aws.String(MsgDedupID) + + // Do not Use Machinery's signature Group UUID as SQS Message Group ID, instead use BrokerMessageGroupId + MsgGroupID := signature.BrokerMessageGroupId + if MsgGroupID == "" { + return fmt.Errorf("please specify BrokerMessageGroupId attribute for task Signature when submitting a task to FIFO queue") + } + MsgInput.MessageGroupId = aws.String(MsgGroupID) + } + + // Check the ETA signature field, if it is set and it is in the future, + // and is not a fifo queue, set a delay in seconds for the task. + if signature.ETA != nil && !strings.HasSuffix(signature.RoutingKey, ".fifo") { + now := time.Now().UTC() + delay := signature.ETA.Sub(now) + if delay > 0 { + if delay > maxAWSSQSDelay { + return errors.New("Max AWS SQS delay exceeded") + } + MsgInput.DelaySeconds = aws.Int64(int64(delay.Seconds())) + } + } + + result, err := b.service.SendMessageWithContext(ctx, MsgInput) + + if err != nil { + log.ERROR.Printf("Error when sending a message: %v", err) + return err + + } + log.INFO.Printf("Sending a message successfully, the messageId is %v", *result.MessageId) + return nil + +} + +// consume is a method which keeps consuming deliveries from a channel, until there is an error or a stop signal +func (b *Broker) consume(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}) error { + + errorsChan := make(chan error) + + for { + whetherContinue, err := b.consumeDeliveries(deliveries, concurrency, taskProcessor, pool, errorsChan) + if err != nil { + return err + } + if whetherContinue == false { + return nil + } + } +} + +// consumeOne is a method consumes a delivery. If a delivery was consumed successfully, it will be deleted from AWS SQS +func (b *Broker) consumeOne(delivery *awssqs.ReceiveMessageOutput, taskProcessor iface.TaskProcessor) error { + if len(delivery.Messages) == 0 { + log.ERROR.Printf("received an empty message, the delivery was %v", delivery) + return errors.New("received empty message, the delivery is " + delivery.GoString()) + } + + sig := new(tasks.Signature) + decoder := json.NewDecoder(strings.NewReader(*delivery.Messages[0].Body)) + decoder.UseNumber() + if err := decoder.Decode(sig); err != nil { + log.ERROR.Printf("unmarshal error. the delivery is %v", delivery) + // if the unmarshal fails, remove the delivery from the queue + if delErr := b.deleteOne(delivery); delErr != nil { + log.ERROR.Printf("error when deleting the delivery. delivery is %v, Error=%s", delivery, delErr) + } + return err + } + if delivery.Messages[0].ReceiptHandle != nil { + sig.SQSReceiptHandle = *delivery.Messages[0].ReceiptHandle + } + + // If the task is not registered return an error + // and leave the message in the queue + if !b.IsTaskRegistered(sig.Name) { + if sig.IgnoreWhenTaskNotRegistered { + b.deleteOne(delivery) + } + return fmt.Errorf("task %s is not registered", sig.Name) + } + + err := taskProcessor.Process(sig) + if err != nil { + // stop task deletion in case we want to send messages to dlq in sqs + if err == errs.ErrStopTaskDeletion { + return nil + } + return err + } + // Delete message after successfully consuming and processing the message + if err = b.deleteOne(delivery); err != nil { + log.ERROR.Printf("error when deleting the delivery. delivery is %v, Error=%s", delivery, err) + } + return err +} + +// deleteOne is a method delete a delivery from AWS SQS +func (b *Broker) deleteOne(delivery *awssqs.ReceiveMessageOutput) error { + qURL := b.defaultQueueURL() + _, err := b.service.DeleteMessage(&awssqs.DeleteMessageInput{ + QueueUrl: qURL, + ReceiptHandle: delivery.Messages[0].ReceiptHandle, + }) + + if err != nil { + return err + } + return nil +} + +// defaultQueueURL is a method returns the default queue url +func (b *Broker) defaultQueueURL() *string { + if b.queueUrl != nil { + return b.queueUrl + } else { + return aws.String(b.GetConfig().Broker + "/" + b.GetConfig().DefaultQueue) + } + +} + +// receiveMessage is a method receives a message from specified queue url +func (b *Broker) receiveMessage(qURL *string) (*awssqs.ReceiveMessageOutput, error) { + var waitTimeSeconds int + var visibilityTimeout *int + if b.GetConfig().SQS != nil { + waitTimeSeconds = b.GetConfig().SQS.WaitTimeSeconds + visibilityTimeout = b.GetConfig().SQS.VisibilityTimeout + } else { + waitTimeSeconds = 0 + } + input := &awssqs.ReceiveMessageInput{ + AttributeNames: []*string{ + aws.String(awssqs.MessageSystemAttributeNameSentTimestamp), + }, + MessageAttributeNames: []*string{ + aws.String(awssqs.QueueAttributeNameAll), + }, + QueueUrl: qURL, + MaxNumberOfMessages: aws.Int64(1), + WaitTimeSeconds: aws.Int64(int64(waitTimeSeconds)), + } + if visibilityTimeout != nil { + input.VisibilityTimeout = aws.Int64(int64(*visibilityTimeout)) + } + result, err := b.service.ReceiveMessage(input) + if err != nil { + return nil, err + } + return result, err +} + +// initializePool is a method which initializes concurrency pool +func (b *Broker) initializePool(pool chan struct{}, concurrency int) { + for i := 0; i < concurrency; i++ { + pool <- struct{}{} + } +} + +// consumeDeliveries is a method consuming deliveries from deliveries channel +func (b *Broker) consumeDeliveries(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}, errorsChan chan error) (bool, error) { + select { + case err := <-errorsChan: + return false, err + case d := <-deliveries: + + b.processingWG.Add(1) + + // Consume the task inside a goroutine so multiple tasks + // can be processed concurrently + go func() { + + if err := b.consumeOne(d, taskProcessor); err != nil { + errorsChan <- err + } + + b.processingWG.Done() + + if concurrency > 0 { + // give worker back to pool + pool <- struct{}{} + } + }() + case <-b.GetStopChan(): + return false, nil + } + return true, nil +} + +// continueReceivingMessages is a method returns a continue signal +func (b *Broker) continueReceivingMessages(qURL *string, deliveries chan *awssqs.ReceiveMessageOutput) (bool, error) { + select { + // A way to stop this goroutine from b.StopConsuming + case <-b.stopReceivingChan: + return false, nil + default: + output, err := b.receiveMessage(qURL) + if err != nil { + return true, err + } + if len(output.Messages) == 0 { + return true, nil + } + go func() { deliveries <- output }() + } + return true, nil +} + +// stopReceiving is a method sending a signal to stopReceivingChan +func (b *Broker) stopReceiving() { + // Stop the receiving goroutine + b.stopReceivingChan <- 1 +} + +// getQueueURL is a method returns that returns queueURL first by checking if custom queue was set and usign it +// otherwise using default queueName from config +func (b *Broker) getQueueURL(taskProcessor iface.TaskProcessor) *string { + queueName := b.GetConfig().DefaultQueue + if taskProcessor.CustomQueue() != "" { + queueName = taskProcessor.CustomQueue() + } + + return aws.String(b.GetConfig().Broker + "/" + queueName) +} diff --git a/v2/brokers/sqs/sqs_export_test.go b/v2/brokers/sqs/sqs_export_test.go new file mode 100644 index 000000000..8bcd8d626 --- /dev/null +++ b/v2/brokers/sqs/sqs_export_test.go @@ -0,0 +1,211 @@ +package sqs + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + + awssqs "github.com/aws/aws-sdk-go/service/sqs" +) + +var ( + ReceiveMessageOutput *awssqs.ReceiveMessageOutput +) + +type FakeSQS struct { + sqsiface.SQSAPI +} + +func (f *FakeSQS) SendMessage(*awssqs.SendMessageInput) (*awssqs.SendMessageOutput, error) { + output := awssqs.SendMessageOutput{ + MD5OfMessageAttributes: aws.String("d25a6aea97eb8f585bfa92d314504a92"), + MD5OfMessageBody: aws.String("bbdc5fdb8be7251f5c910905db994bab"), + MessageId: aws.String("47f8b355-5115-4b45-b33a-439016400411"), + } + return &output, nil +} + +func (f *FakeSQS) ReceiveMessage(*awssqs.ReceiveMessageInput) (*awssqs.ReceiveMessageOutput, error) { + return ReceiveMessageOutput, nil +} + +func (f *FakeSQS) DeleteMessage(*awssqs.DeleteMessageInput) (*awssqs.DeleteMessageOutput, error) { + return &awssqs.DeleteMessageOutput{}, nil +} + +type ErrorSQS struct { + sqsiface.SQSAPI +} + +func (e *ErrorSQS) SendMessage(*awssqs.SendMessageInput) (*awssqs.SendMessageOutput, error) { + err := errors.New("this is an error") + return nil, err +} + +func (e *ErrorSQS) ReceiveMessage(*awssqs.ReceiveMessageInput) (*awssqs.ReceiveMessageOutput, error) { + err := errors.New("this is an error") + return nil, err +} + +func (e *ErrorSQS) DeleteMessage(*awssqs.DeleteMessageInput) (*awssqs.DeleteMessageOutput, error) { + err := errors.New("this is an error") + return nil, err +} + +func init() { + // TODO: chang message body to signature example + messageBody, _ := json.Marshal(map[string]int{"apple": 5, "lettuce": 7}) + ReceiveMessageOutput = &awssqs.ReceiveMessageOutput{ + Messages: []*awssqs.Message{ + { + Attributes: map[string]*string{ + "SentTimestamp": aws.String("1512962021537"), + }, + Body: aws.String(string(messageBody)), + MD5OfBody: aws.String("bbdc5fdb8be7251f5c910905db994bab"), + MD5OfMessageAttributes: aws.String("d25a6aea97eb8f585bfa92d314504a92"), + MessageAttributes: map[string]*awssqs.MessageAttributeValue{ + "Title": { + DataType: aws.String("String"), + StringValue: aws.String("The Whistler"), + }, + "Author": { + DataType: aws.String("String"), + StringValue: aws.String("John Grisham"), + }, + "WeeksOn": { + DataType: aws.String("Number"), + StringValue: aws.String("6"), + }, + }, + MessageId: aws.String("47f8b355-5115-4b45-b33a-439016400411"), + ReceiptHandle: aws.String("AQEBGhTR/nhq+pDPAunCDgLpwQuCq0JkD2dtv7pAcPF5DA/XaoPAjHfgn/PZ5DeG3YiQdTjCUj+rvFq5b79DTq+hK6r1Niuds02l+jdIk3u2JiL01Dsd203pW1lLUNryd74QAcn462eXzv7/hVDagXTn+KtOzox3X0vmPkCSQkWXWxtc23oa5+5Q7HWDmRm743L0zza1579rQ2R2B0TrdlTMpNsdjQlDmybNu+aDq8bazD/Wew539tIvUyYADuhVyKyS1L2QQuyXll73/DixulPNmvGPRHNoB1GIo+Ex929OHFchXoKonoFJnurX4VNNl1p/Byp2IYBi6nkTRzeJUFCrFq0WMAHKLwuxciezJSlLD7g3bbU8kgEer8+jTz1DBriUlDGsARr0s7mnlsd02cb46K/j+u1oPfA69vIVc0FaRtA="), + }, + }, + } +} + +func NewTestConfig() *config.Config { + + redisURL := os.Getenv("REDIS_URL") + if redisURL == "" { + redisURL = "eager" + } + brokerURL := "https://sqs.foo.amazonaws.com.cn" + return &config.Config{ + Broker: brokerURL, + DefaultQueue: "test_queue", + ResultBackend: fmt.Sprintf("redis://%v", redisURL), + Lock: fmt.Sprintf("redis://%v", redisURL), + } +} + +func NewTestBroker() *Broker { + + cnf := NewTestConfig() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + + svc := new(FakeSQS) + return &Broker{ + Broker: common.NewBroker(cnf), + sess: sess, + service: svc, + processingWG: sync.WaitGroup{}, + receivingWG: sync.WaitGroup{}, + stopReceivingChan: make(chan int), + } +} + +func NewTestErrorBroker() *Broker { + + cnf := NewTestConfig() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + + errSvc := new(ErrorSQS) + return &Broker{ + Broker: common.NewBroker(cnf), + sess: sess, + service: errSvc, + processingWG: sync.WaitGroup{}, + receivingWG: sync.WaitGroup{}, + stopReceivingChan: make(chan int), + } +} + +func (b *Broker) ConsumeForTest(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}) error { + return b.consume(deliveries, concurrency, taskProcessor, pool) +} + +func (b *Broker) ConsumeOneForTest(delivery *awssqs.ReceiveMessageOutput, taskProcessor iface.TaskProcessor) error { + return b.consumeOne(delivery, taskProcessor) +} + +func (b *Broker) DeleteOneForTest(delivery *awssqs.ReceiveMessageOutput) error { + return b.deleteOne(delivery) +} + +func (b *Broker) DefaultQueueURLForTest() *string { + return b.defaultQueueURL() +} + +func (b *Broker) ReceiveMessageForTest(qURL *string) (*awssqs.ReceiveMessageOutput, error) { + return b.receiveMessage(qURL) +} + +func (b *Broker) InitializePoolForTest(pool chan struct{}, concurrency int) { + b.initializePool(pool, concurrency) +} + +func (b *Broker) ConsumeDeliveriesForTest(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}, errorsChan chan error) (bool, error) { + return b.consumeDeliveries(deliveries, concurrency, taskProcessor, pool, errorsChan) +} + +func (b *Broker) ContinueReceivingMessagesForTest(qURL *string, deliveries chan *awssqs.ReceiveMessageOutput) (bool, error) { + return b.continueReceivingMessages(qURL, deliveries) +} + +func (b *Broker) StopReceivingForTest() { + b.stopReceiving() +} + +func (b *Broker) GetStopReceivingChanForTest() chan int { + return b.stopReceivingChan +} + +func (b *Broker) StartConsumingForTest(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) { + b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) +} + +func (b *Broker) GetRetryFuncForTest() func(chan int) { + return b.GetRetryFunc() +} + +func (b *Broker) GetStopChanForTest() chan int { + return b.GetStopChan() +} + +func (b *Broker) GetRetryStopChanForTest() chan int { + return b.GetRetryStopChan() +} + +func (b *Broker) GetQueueURLForTest(taskProcessor iface.TaskProcessor) *string { + return b.getQueueURL(taskProcessor) +} + +func (b *Broker) GetCustomQueueURL(customQueue string) *string { + return aws.String(b.GetConfig().Broker + "/" + customQueue) +} diff --git a/v2/brokers/sqs/sqs_test.go b/v2/brokers/sqs/sqs_test.go new file mode 100644 index 000000000..797c5de28 --- /dev/null +++ b/v2/brokers/sqs/sqs_test.go @@ -0,0 +1,340 @@ +package sqs_test + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + + "github.com/RichardKnop/machinery/v2" + "github.com/RichardKnop/machinery/v2/brokers/sqs" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/retry" + + awssqs "github.com/aws/aws-sdk-go/service/sqs" +) + +var ( + cnf *config.Config + receiveMessageOutput *awssqs.ReceiveMessageOutput +) + +func init() { + cnf = sqs.NewTestConfig() + receiveMessageOutput = sqs.ReceiveMessageOutput +} + +func TestNewAWSSQSBroker(t *testing.T) { + t.Parallel() + + broker := sqs.NewTestBroker() + + assert.IsType(t, broker, sqs.New(cnf)) +} + +func TestPrivateFunc_continueReceivingMessages(t *testing.T) { + + broker := sqs.NewTestBroker() + errorBroker := sqs.NewTestErrorBroker() + + qURL := broker.DefaultQueueURLForTest() + deliveries := make(chan *awssqs.ReceiveMessageOutput) + firstStep := make(chan int) + nextStep := make(chan int) + go func() { + stopReceivingChan := broker.GetStopReceivingChanForTest() + firstStep <- 1 + stopReceivingChan <- 1 + }() + + var ( + whetherContinue bool + err error + ) + <-firstStep + // Test the case that a signal was received from stopReceivingChan + go func() { + whetherContinue, err = broker.ContinueReceivingMessagesForTest(qURL, deliveries) + nextStep <- 1 + }() + <-nextStep + assert.False(t, whetherContinue) + assert.Nil(t, err) + + // Test the default condition + whetherContinue, err = broker.ContinueReceivingMessagesForTest(qURL, deliveries) + assert.True(t, whetherContinue) + assert.Nil(t, err) + + // Test the error + whetherContinue, err = errorBroker.ContinueReceivingMessagesForTest(qURL, deliveries) + assert.True(t, whetherContinue) + assert.NotNil(t, err) + + // Test when there is no message + outputCopy := *receiveMessageOutput + receiveMessageOutput.Messages = []*awssqs.Message{} + whetherContinue, err = broker.ContinueReceivingMessagesForTest(qURL, deliveries) + assert.True(t, whetherContinue) + assert.Nil(t, err) + // recover original value + *receiveMessageOutput = outputCopy +} + +func TestPrivateFunc_consume(t *testing.T) { + + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + pool := make(chan struct{}) + wk := server1.NewWorker("sms_worker", 0) + deliveries := make(chan *awssqs.ReceiveMessageOutput) + outputCopy := *receiveMessageOutput + outputCopy.Messages = []*awssqs.Message{} + go func() { deliveries <- &outputCopy }() + + broker := sqs.NewTestBroker() + + // an infinite loop will be executed only when there is no error + err = broker.ConsumeForTest(deliveries, 0, wk, pool) + assert.NotNil(t, err) +} + +func TestPrivateFunc_consumeOne(t *testing.T) { + + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + wk := server1.NewWorker("sms_worker", 0) + broker := sqs.NewTestBroker() + + err = broker.ConsumeOneForTest(receiveMessageOutput, wk) + assert.NotNil(t, err) + + outputCopy := *receiveMessageOutput + outputCopy.Messages = []*awssqs.Message{} + err = broker.ConsumeOneForTest(&outputCopy, wk) + assert.NotNil(t, err) + + outputCopy.Messages = []*awssqs.Message{ + { + Body: aws.String("foo message"), + }, + } + err = broker.ConsumeOneForTest(&outputCopy, wk) + assert.NotNil(t, err) +} + +func TestPrivateFunc_initializePool(t *testing.T) { + + broker := sqs.NewTestBroker() + + concurrency := 9 + pool := make(chan struct{}, concurrency) + broker.InitializePoolForTest(pool, concurrency) + assert.Len(t, pool, concurrency) +} + +func TestPrivateFunc_startConsuming(t *testing.T) { + + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + + wk := server1.NewWorker("sms_worker", 0) + broker := sqs.NewTestBroker() + + retryFunc := broker.GetRetryFuncForTest() + stopChan := broker.GetStopChanForTest() + retryStopChan := broker.GetRetryStopChanForTest() + assert.Nil(t, retryFunc) + + broker.StartConsumingForTest("fooTag", 1, wk) + assert.IsType(t, retryFunc, retry.Closure()) + assert.Equal(t, len(stopChan), 0) + assert.Equal(t, len(retryStopChan), 0) +} + +func TestPrivateFuncDefaultQueueURL(t *testing.T) { + + broker := sqs.NewTestBroker() + + qURL := broker.DefaultQueueURLForTest() + + assert.EqualValues(t, *qURL, "https://sqs.foo.amazonaws.com.cn/test_queue") +} + +func TestPrivateFunc_stopReceiving(t *testing.T) { + + broker := sqs.NewTestBroker() + + go broker.StopReceivingForTest() + + stopReceivingChan := broker.GetStopReceivingChanForTest() + assert.NotNil(t, <-stopReceivingChan) +} + +func TestPrivateFunc_receiveMessage(t *testing.T) { + + broker := sqs.NewTestBroker() + + qURL := broker.DefaultQueueURLForTest() + output, err := broker.ReceiveMessageForTest(qURL) + assert.Nil(t, err) + assert.Equal(t, receiveMessageOutput, output) +} + +func TestPrivateFunc_consumeDeliveries(t *testing.T) { + + concurrency := 0 + pool := make(chan struct{}, concurrency) + errorsChan := make(chan error) + deliveries := make(chan *awssqs.ReceiveMessageOutput) + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + + wk := server1.NewWorker("sms_worker", 0) + broker := sqs.NewTestBroker() + + go func() { deliveries <- receiveMessageOutput }() + whetherContinue, err := broker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan) + assert.True(t, whetherContinue) + assert.Nil(t, err) + + go func() { errorsChan <- errors.New("foo error") }() + whetherContinue, err = broker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan) + assert.False(t, whetherContinue) + assert.NotNil(t, err) + + go func() { broker.GetStopChanForTest() <- 1 }() + whetherContinue, err = broker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan) + assert.False(t, whetherContinue) + assert.Nil(t, err) + + outputCopy := *receiveMessageOutput + outputCopy.Messages = []*awssqs.Message{} + go func() { deliveries <- &outputCopy }() + whetherContinue, err = broker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan) + e := <-errorsChan + assert.True(t, whetherContinue) + assert.NotNil(t, e) + assert.Nil(t, err) + + // using a wait group and a channel to fix the racing problem + var wg sync.WaitGroup + wg.Add(1) + nextStep := make(chan bool, 1) + go func() { + defer wg.Done() + // nextStep <- true runs after defer wg.Done(), to make sure the next go routine runs after this go routine + nextStep <- true + deliveries <- receiveMessageOutput + }() + if <-nextStep { + // <-pool will block the routine in the following steps, so pool <- struct{}{} will be executed for sure + go func() { wg.Wait(); pool <- struct{}{} }() + } + whetherContinue, err = broker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan) + // the pool shouldn't be consumed + p := <-pool + assert.True(t, whetherContinue) + assert.NotNil(t, p) + assert.Nil(t, err) +} + +func TestPrivateFunc_deleteOne(t *testing.T) { + + broker := sqs.NewTestBroker() + errorBroker := sqs.NewTestErrorBroker() + + err := broker.DeleteOneForTest(receiveMessageOutput) + assert.Nil(t, err) + + err = errorBroker.DeleteOneForTest(receiveMessageOutput) + assert.NotNil(t, err) +} + +func Test_CustomQueueName(t *testing.T) { + + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + + broker := sqs.NewTestBroker() + + wk := server1.NewWorker("test-worker", 0) + qURL := broker.GetQueueURLForTest(wk) + assert.Equal(t, qURL, broker.DefaultQueueURLForTest(), "") + + wk2 := server1.NewCustomQueueWorker("test-worker", 0, "my-custom-queue") + qURL2 := broker.GetQueueURLForTest(wk2) + assert.Equal(t, qURL2, broker.GetCustomQueueURL("my-custom-queue"), "") +} + +func TestPrivateFunc_consumeWithConcurrency(t *testing.T) { + + msg := `{ + "UUID": "uuid-dummy-task", + "Name": "test-task", + "RoutingKey": "dummy-routing" + } + ` + + testResp := "47f8b355-5115-4b45-b33a-439016400411" + output := make(chan string) // The output channel + + cnf.ResultBackend = "eager" + server1, err := machinery.NewServer(cnf) + if err != nil { + t.Fatal(err) + } + err = server1.RegisterTask("test-task", func(ctx context.Context) error { + output <- testResp + + return nil + }) + + broker := sqs.NewTestBroker() + + broker.SetRegisteredTaskNames([]string{"test-task"}) + assert.NoError(t, err) + pool := make(chan struct{}, 1) + pool <- struct{}{} + wk := server1.NewWorker("sms_worker", 1) + deliveries := make(chan *awssqs.ReceiveMessageOutput) + outputCopy := *receiveMessageOutput + outputCopy.Messages = []*awssqs.Message{ + { + MessageId: aws.String("test-sqs-msg1"), + Body: aws.String(msg), + }, + } + + go func() { + deliveries <- &outputCopy + + }() + + go func() { + err = broker.ConsumeForTest(deliveries, 1, wk, pool) + }() + + select { + case resp := <-output: + assert.Equal(t, testResp, resp) + + case <-time.After(10 * time.Second): + // call timed out + t.Fatal("task not processed in 10 seconds") + } +} diff --git a/v2/common/amqp.go b/v2/common/amqp.go new file mode 100644 index 000000000..21e8eb034 --- /dev/null +++ b/v2/common/amqp.go @@ -0,0 +1,147 @@ +package common + +import ( + "crypto/tls" + "fmt" + "strings" + + "github.com/streadway/amqp" +) + +// AMQPConnector ... +type AMQPConnector struct{} + +// Connect opens a connection to RabbitMQ, declares an exchange, opens a channel, +// declares and binds the queue and enables publish notifications +func (ac *AMQPConnector) Connect(urls string, urlSeparator string, tlsConfig *tls.Config, exchange, exchangeType, queueName string, queueDurable, queueDelete bool, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*amqp.Connection, *amqp.Channel, amqp.Queue, <-chan amqp.Confirmation, <-chan *amqp.Error, error) { + urlsList := []string{urls} + if urlSeparator != "" { + urlsList = strings.Split(urls, urlSeparator) + } + + var conn *amqp.Connection + var channel *amqp.Channel + var err error + + for _, url := range urlsList { + // Connect to server + conn, channel, err = ac.Open(url, tlsConfig) + if err != nil { + continue + } else { + break + } + } + + if err != nil { + return nil, nil, amqp.Queue{}, nil, nil, err + } + + if exchange != "" { + // Declare an exchange + if err = channel.ExchangeDeclare( + exchange, // name of the exchange + exchangeType, // type + true, // durable + false, // delete when complete + false, // internal + false, // noWait + exchangeDeclareArgs, // arguments + ); err != nil { + return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Exchange declare error: %s", err) + } + } + + var queue amqp.Queue + if queueName != "" { + // Declare a queue + queue, err = channel.QueueDeclare( + queueName, // name + queueDurable, // durable + queueDelete, // delete when unused + false, // exclusive + false, // no-wait + queueDeclareArgs, // arguments + ) + if err != nil { + return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Queue declare error: %s", err) + } + + // Bind the queue + if err = channel.QueueBind( + queue.Name, // name of the queue + queueBindingKey, // binding key + exchange, // source exchange + false, // noWait + queueBindingArgs, // arguments + ); err != nil { + return conn, channel, queue, nil, nil, fmt.Errorf("Queue bind error: %s", err) + } + } + + // Enable publish confirmations + if err = channel.Confirm(false); err != nil { + return conn, channel, queue, nil, nil, fmt.Errorf("Channel could not be put into confirm mode: %s", err) + } + + return conn, channel, queue, channel.NotifyPublish(make(chan amqp.Confirmation, 1)), conn.NotifyClose(make(chan *amqp.Error, 1)), nil +} + +// DeleteQueue deletes a queue by name +func (ac *AMQPConnector) DeleteQueue(channel *amqp.Channel, queueName string) error { + // First return value is number of messages removed + _, err := channel.QueueDelete( + queueName, // name + false, // ifUnused + false, // ifEmpty + false, // noWait + ) + + return err +} + +// InspectQueue provides information about a specific queue +func (*AMQPConnector) InspectQueue(channel *amqp.Channel, queueName string) (*amqp.Queue, error) { + queueState, err := channel.QueueInspect(queueName) + if err != nil { + return nil, fmt.Errorf("Queue inspect error: %s", err) + } + + return &queueState, nil +} + +// Open new RabbitMQ connection +func (ac *AMQPConnector) Open(url string, tlsConfig *tls.Config) (*amqp.Connection, *amqp.Channel, error) { + // Connect + // From amqp docs: DialTLS will use the provided tls.Config when it encounters an amqps:// scheme + // and will dial a plain connection when it encounters an amqp:// scheme. + conn, err := amqp.DialTLS(url, tlsConfig) + if err != nil { + return nil, nil, fmt.Errorf("Dial error: %s", err) + } + + // Open a channel + channel, err := conn.Channel() + if err != nil { + return nil, nil, fmt.Errorf("Open channel error: %s", err) + } + + return conn, channel, nil +} + +// Close connection +func (ac *AMQPConnector) Close(channel *amqp.Channel, conn *amqp.Connection) error { + if channel != nil { + if err := channel.Close(); err != nil { + return fmt.Errorf("Close channel error: %s", err) + } + } + + if conn != nil { + if err := conn.Close(); err != nil { + return fmt.Errorf("Close connection error: %s", err) + } + } + + return nil +} diff --git a/v2/common/backend.go b/v2/common/backend.go new file mode 100644 index 000000000..3ab094a75 --- /dev/null +++ b/v2/common/backend.go @@ -0,0 +1,25 @@ +package common + +import ( + "github.com/RichardKnop/machinery/v2/config" +) + +// Backend represents a base backend structure +type Backend struct { + cnf *config.Config +} + +// NewBackend creates new Backend instance +func NewBackend(cnf *config.Config) Backend { + return Backend{cnf: cnf} +} + +// GetConfig returns config +func (b *Backend) GetConfig() *config.Config { + return b.cnf +} + +// IsAMQP ... +func (b *Backend) IsAMQP() bool { + return false +} diff --git a/v2/common/broker.go b/v2/common/broker.go new file mode 100644 index 000000000..d139ab559 --- /dev/null +++ b/v2/common/broker.go @@ -0,0 +1,139 @@ +package common + +import ( + "errors" + "sync" + + "github.com/RichardKnop/machinery/v2/brokers/iface" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/retry" + "github.com/RichardKnop/machinery/v2/tasks" +) + +type registeredTaskNames struct { + sync.RWMutex + items []string +} + +// Broker represents a base broker structure +type Broker struct { + cnf *config.Config + registeredTaskNames registeredTaskNames + retry bool + retryFunc func(chan int) + retryStopChan chan int + stopChan chan int +} + +// NewBroker creates new Broker instance +func NewBroker(cnf *config.Config) Broker { + return Broker{ + cnf: cnf, + retry: true, + stopChan: make(chan int), + retryStopChan: make(chan int), + } +} + +// GetConfig returns config +func (b *Broker) GetConfig() *config.Config { + return b.cnf +} + +// GetRetry ... +func (b *Broker) GetRetry() bool { + return b.retry +} + +// GetRetryFunc ... +func (b *Broker) GetRetryFunc() func(chan int) { + return b.retryFunc +} + +// GetRetryStopChan ... +func (b *Broker) GetRetryStopChan() chan int { + return b.retryStopChan +} + +// GetStopChan ... +func (b *Broker) GetStopChan() chan int { + return b.stopChan +} + +// Publish places a new message on the default queue +func (b *Broker) Publish(signature *tasks.Signature) error { + return errors.New("Not implemented") +} + +// SetRegisteredTaskNames sets registered task names +func (b *Broker) SetRegisteredTaskNames(names []string) { + b.registeredTaskNames.Lock() + defer b.registeredTaskNames.Unlock() + b.registeredTaskNames.items = names +} + +// IsTaskRegistered returns true if the task is registered with this broker +func (b *Broker) IsTaskRegistered(name string) bool { + b.registeredTaskNames.RLock() + defer b.registeredTaskNames.RUnlock() + for _, registeredTaskName := range b.registeredTaskNames.items { + if registeredTaskName == name { + return true + } + } + return false +} + +// GetPendingTasks returns a slice of task.Signatures waiting in the queue +func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { + return nil, errors.New("Not implemented") +} + +// GetDelayedTasks returns a slice of task.Signatures that are scheduled, but not yet in the queue +func (b *Broker) GetDelayedTasks() ([]*tasks.Signature, error) { + return nil, errors.New("Not implemented") +} + +// StartConsuming is a common part of StartConsuming method +func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) { + if b.retryFunc == nil { + b.retryFunc = retry.Closure() + } + +} + +// StopConsuming is a common part of StopConsuming +func (b *Broker) StopConsuming() { + // Do not retry from now on + b.retry = false + // Stop the retry closure earlier + select { + case b.retryStopChan <- 1: + log.WARNING.Print("Stopping retry closure.") + default: + } + // Notifying the stop channel stops consuming of messages + close(b.stopChan) + log.WARNING.Print("Stop channel") +} + +// GetRegisteredTaskNames returns registered tasks names +func (b *Broker) GetRegisteredTaskNames() []string { + b.registeredTaskNames.RLock() + defer b.registeredTaskNames.RUnlock() + items := b.registeredTaskNames.items + return items +} + +// AdjustRoutingKey makes sure the routing key is correct. +// If the routing key is an empty string: +// a) set it to binding key for direct exchange type +// b) set it to default queue name +func (b *Broker) AdjustRoutingKey(s *tasks.Signature) { + if s.RoutingKey != "" { + return + } + + s.RoutingKey = b.GetConfig().DefaultQueue +} diff --git a/v2/common/broker_test.go b/v2/common/broker_test.go new file mode 100644 index 000000000..78f6c1da2 --- /dev/null +++ b/v2/common/broker_test.go @@ -0,0 +1,74 @@ +package common_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2" + "github.com/RichardKnop/machinery/v2/common" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestIsTaskRegistered(t *testing.T) { + t.Parallel() + + broker := common.NewBroker(new(config.Config)) + broker.SetRegisteredTaskNames([]string{"foo", "bar"}) + + assert.True(t, broker.IsTaskRegistered("foo")) + assert.False(t, broker.IsTaskRegistered("bogus")) +} + +func TestAdjustRoutingKey(t *testing.T) { + t.Parallel() + + var ( + s *tasks.Signature + broker common.Broker + ) + + t.Run("with routing key", func(t *testing.T) { + s = &tasks.Signature{RoutingKey: "routing_key"} + broker = common.NewBroker(&config.Config{ + DefaultQueue: "queue", + }) + broker.AdjustRoutingKey(s) + assert.Equal(t, "routing_key", s.RoutingKey) + }) + + t.Run("without routing key", func(t *testing.T) { + s = new(tasks.Signature) + broker = common.NewBroker(&config.Config{ + DefaultQueue: "queue", + }) + broker.AdjustRoutingKey(s) + assert.Equal(t, "queue", s.RoutingKey) + }) +} + +func TestGetRegisteredTaskNames(t *testing.T) { + t.Parallel() + + broker := common.NewBroker(new(config.Config)) + fooTasks := []string{"foo", "bar", "baz"} + broker.SetRegisteredTaskNames(fooTasks) + assert.Equal(t, fooTasks, broker.GetRegisteredTaskNames()) +} + +func TestStopConsuming(t *testing.T) { + t.Parallel() + + t.Run("stop consuming", func(t *testing.T) { + broker := common.NewBroker(&config.Config{ + DefaultQueue: "queue", + }) + broker.StartConsuming("", 1, &machinery.Worker{}) + broker.StopConsuming() + select { + case <-broker.GetStopChan(): + default: + assert.Fail(t, "still blocking") + } + }) +} diff --git a/v2/common/redis.go b/v2/common/redis.go new file mode 100644 index 000000000..6aa7494dc --- /dev/null +++ b/v2/common/redis.go @@ -0,0 +1,87 @@ +package common + +import ( + "crypto/tls" + "time" + + "github.com/gomodule/redigo/redis" + + "github.com/RichardKnop/machinery/v2/config" +) + +var ( + defaultConfig = &config.RedisConfig{ + MaxIdle: 10, + MaxActive: 100, + IdleTimeout: 300, + Wait: true, + ReadTimeout: 15, + WriteTimeout: 15, + ConnectTimeout: 15, + NormalTasksPollPeriod: 1000, + DelayedTasksPollPeriod: 20, + } +) + +// RedisConnector ... +type RedisConnector struct{} + +// NewPool returns a new pool of Redis connections +func (rc *RedisConnector) NewPool(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) *redis.Pool { + if cnf == nil { + cnf = defaultConfig + } + return &redis.Pool{ + MaxIdle: cnf.MaxIdle, + IdleTimeout: time.Duration(cnf.IdleTimeout) * time.Second, + MaxActive: cnf.MaxActive, + Wait: cnf.Wait, + Dial: func() (redis.Conn, error) { + c, err := rc.open(socketPath, host, password, db, cnf, tlsConfig) + if err != nil { + return nil, err + } + + if db != 0 { + _, err = c.Do("SELECT", db) + if err != nil { + return nil, err + } + } + + return c, err + }, + // PINGs connections that have been idle more than 10 seconds + TestOnBorrow: func(c redis.Conn, t time.Time) error { + if time.Since(t) < time.Duration(10*time.Second) { + return nil + } + _, err := c.Do("PING") + return err + }, + } +} + +// Open a new Redis connection +func (rc *RedisConnector) open(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) (redis.Conn, error) { + var opts = []redis.DialOption{ + redis.DialDatabase(db), + redis.DialReadTimeout(time.Duration(cnf.ReadTimeout) * time.Second), + redis.DialWriteTimeout(time.Duration(cnf.WriteTimeout) * time.Second), + redis.DialConnectTimeout(time.Duration(cnf.ConnectTimeout) * time.Second), + } + + if tlsConfig != nil { + opts = append(opts, redis.DialTLSConfig(tlsConfig), redis.DialUseTLS(true)) + } + + if password != "" { + opts = append(opts, redis.DialPassword(password)) + } + + if socketPath != "" { + return redis.Dial("unix", socketPath, opts...) + } + + return redis.Dial("tcp", host, opts...) +} diff --git a/v2/config/config.go b/v2/config/config.go new file mode 100644 index 000000000..f2a326cb1 --- /dev/null +++ b/v2/config/config.go @@ -0,0 +1,180 @@ +package config + +import ( + "crypto/tls" + "fmt" + "strings" + "time" + + "cloud.google.com/go/pubsub" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/sqs" + "go.mongodb.org/mongo-driver/mongo" +) + +const ( + // DefaultResultsExpireIn is a default time used to expire task states and group metadata from the backend + DefaultResultsExpireIn = 3600 +) + +var ( + // Start with sensible default values + defaultCnf = &Config{ + Broker: "amqp://guest:guest@localhost:5672/", + DefaultQueue: "machinery_tasks", + ResultBackend: "amqp://guest:guest@localhost:5672/", + ResultsExpireIn: DefaultResultsExpireIn, + AMQP: &AMQPConfig{ + Exchange: "machinery_exchange", + ExchangeType: "direct", + BindingKey: "machinery_task", + PrefetchCount: 3, + }, + DynamoDB: &DynamoDBConfig{ + TaskStatesTable: "task_states", + GroupMetasTable: "group_metas", + }, + Redis: &RedisConfig{ + MaxIdle: 3, + IdleTimeout: 240, + ReadTimeout: 15, + WriteTimeout: 15, + ConnectTimeout: 15, + NormalTasksPollPeriod: 1000, + DelayedTasksPollPeriod: 500, + }, + GCPPubSub: &GCPPubSubConfig{ + Client: nil, + }, + } + + reloadDelay = time.Second * 10 +) + +// Config holds all configuration for our program +type Config struct { + Broker string `yaml:"broker" envconfig:"BROKER"` + Lock string `yaml:"lock" envconfig:"LOCK"` + MultipleBrokerSeparator string `yaml:"multiple_broker_separator" envconfig:"MULTIPLE_BROKEN_SEPARATOR"` + DefaultQueue string `yaml:"default_queue" envconfig:"DEFAULT_QUEUE"` + ResultBackend string `yaml:"result_backend" envconfig:"RESULT_BACKEND"` + ResultsExpireIn int `yaml:"results_expire_in" envconfig:"RESULTS_EXPIRE_IN"` + AMQP *AMQPConfig `yaml:"amqp"` + SQS *SQSConfig `yaml:"sqs"` + Redis *RedisConfig `yaml:"redis"` + GCPPubSub *GCPPubSubConfig `yaml:"-" ignored:"true"` + MongoDB *MongoDBConfig `yaml:"-" ignored:"true"` + TLSConfig *tls.Config + // NoUnixSignals - when set disables signal handling in machinery + NoUnixSignals bool `yaml:"no_unix_signals" envconfig:"NO_UNIX_SIGNALS"` + DynamoDB *DynamoDBConfig `yaml:"dynamodb"` +} + +// QueueBindingArgs arguments which are used when binding to the exchange +type QueueBindingArgs map[string]interface{} + +// QueueDeclareArgs arguments which are used when declaring a queue +type QueueDeclareArgs map[string]interface{} + +// AMQPConfig wraps RabbitMQ related configuration +type AMQPConfig struct { + Exchange string `yaml:"exchange" envconfig:"AMQP_EXCHANGE"` + ExchangeType string `yaml:"exchange_type" envconfig:"AMQP_EXCHANGE_TYPE"` + QueueDeclareArgs QueueDeclareArgs `yaml:"queue_declare_args" envconfig:"AMQP_QUEUE_DECLARE_ARGS"` + QueueBindingArgs QueueBindingArgs `yaml:"queue_binding_args" envconfig:"AMQP_QUEUE_BINDING_ARGS"` + BindingKey string `yaml:"binding_key" envconfig:"AMQP_BINDING_KEY"` + PrefetchCount int `yaml:"prefetch_count" envconfig:"AMQP_PREFETCH_COUNT"` + AutoDelete bool `yaml:"auto_delete" envconfig:"AMQP_AUTO_DELETE"` +} + +// DynamoDBConfig wraps DynamoDB related configuration +type DynamoDBConfig struct { + Client *dynamodb.DynamoDB + TaskStatesTable string `yaml:"task_states_table" envconfig:"TASK_STATES_TABLE"` + GroupMetasTable string `yaml:"group_metas_table" envconfig:"GROUP_METAS_TABLE"` +} + +// SQSConfig wraps SQS related configuration +type SQSConfig struct { + Client *sqs.SQS + WaitTimeSeconds int `yaml:"receive_wait_time_seconds" envconfig:"SQS_WAIT_TIME_SECONDS"` + // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html + // visibility timeout should default to nil to use the overall visibility timeout for the queue + VisibilityTimeout *int `yaml:"receive_visibility_timeout" envconfig:"SQS_VISIBILITY_TIMEOUT"` +} + +// RedisConfig ... +type RedisConfig struct { + // Maximum number of idle connections in the pool. + // Default: 10 + MaxIdle int `yaml:"max_idle" envconfig:"REDIS_MAX_IDLE"` + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + // Default: 100 + MaxActive int `yaml:"max_active" envconfig:"REDIS_MAX_ACTIVE"` + + // Close connections after remaining idle for this duration in seconds. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + // Default: 300 + IdleTimeout int `yaml:"max_idle_timeout" envconfig:"REDIS_IDLE_TIMEOUT"` + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + // Default: true + Wait bool `yaml:"wait" envconfig:"REDIS_WAIT"` + + // ReadTimeout specifies the timeout in seconds for reading a single command reply. + // Default: 15 + ReadTimeout int `yaml:"read_timeout" envconfig:"REDIS_READ_TIMEOUT"` + + // WriteTimeout specifies the timeout in seconds for writing a single command. + // Default: 15 + WriteTimeout int `yaml:"write_timeout" envconfig:"REDIS_WRITE_TIMEOUT"` + + // ConnectTimeout specifies the timeout in seconds for connecting to the Redis server when + // no DialNetDial option is specified. + // Default: 15 + ConnectTimeout int `yaml:"connect_timeout" envconfig:"REDIS_CONNECT_TIMEOUT"` + + // NormalTasksPollPeriod specifies the period in milliseconds when polling redis for normal tasks + // Default: 1000 + NormalTasksPollPeriod int `yaml:"normal_tasks_poll_period" envconfig:"REDIS_NORMAL_TASKS_POLL_PERIOD"` + + // DelayedTasksPollPeriod specifies the period in milliseconds when polling redis for delayed tasks + // Default: 20 + DelayedTasksPollPeriod int `yaml:"delayed_tasks_poll_period" envconfig:"REDIS_DELAYED_TASKS_POLL_PERIOD"` + DelayedTasksKey string `yaml:"delayed_tasks_key" envconfig:"REDIS_DELAYED_TASKS_KEY"` + + // MasterName specifies a redis master name in order to configure a sentinel-backed redis FailoverClient + MasterName string `yaml:"master_name" envconfig:"REDIS_MASTER_NAME"` +} + +// GCPPubSubConfig wraps GCP PubSub related configuration +type GCPPubSubConfig struct { + Client *pubsub.Client + MaxExtension time.Duration +} + +// MongoDBConfig ... +type MongoDBConfig struct { + Client *mongo.Client + Database string +} + +// Decode from yaml to map (any field whose type or pointer-to-type implements +// envconfig.Decoder can control its own deserialization) +func (args *QueueBindingArgs) Decode(value string) error { + pairs := strings.Split(value, ",") + mp := make(map[string]interface{}, len(pairs)) + for _, pair := range pairs { + kvpair := strings.Split(pair, ":") + if len(kvpair) != 2 { + return fmt.Errorf("invalid map item: %q", pair) + } + mp[kvpair[0]] = kvpair[1] + } + *args = QueueBindingArgs(mp) + return nil +} diff --git a/v2/config/env.go b/v2/config/env.go new file mode 100644 index 000000000..a0c935bdd --- /dev/null +++ b/v2/config/env.go @@ -0,0 +1,37 @@ +package config + +import ( + "github.com/kelseyhightower/envconfig" + + "github.com/RichardKnop/machinery/v2/log" +) + +// NewFromEnvironment creates a config object from environment variables +func NewFromEnvironment() (*Config, error) { + cnf, err := fromEnvironment() + if err != nil { + return nil, err + } + + log.INFO.Print("Successfully loaded config from the environment") + + return cnf, nil +} + +func fromEnvironment() (*Config, error) { + loadedCnf, cnf := new(Config), new(Config) + *cnf = *defaultCnf + + if err := envconfig.Process("", cnf); err != nil { + return nil, err + } + if err := envconfig.Process("", loadedCnf); err != nil { + return nil, err + } + + if loadedCnf.AMQP == nil { + cnf.AMQP = nil + } + + return cnf, nil +} diff --git a/v2/config/env_test.go b/v2/config/env_test.go new file mode 100644 index 000000000..799e8f2b4 --- /dev/null +++ b/v2/config/env_test.go @@ -0,0 +1,46 @@ +package config_test + +import ( + "bufio" + "os" + "strings" + "testing" + + "github.com/RichardKnop/machinery/v2/config" + "github.com/stretchr/testify/assert" +) + +func TestNewFromEnvironment(t *testing.T) { + t.Parallel() + + file, err := os.Open("test.env") + if err != nil { + t.Fatal(err) + } + reader := bufio.NewReader(file) + scanner := bufio.NewScanner(reader) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + parts := strings.Split(scanner.Text(), "=") + if len(parts) != 2 { + continue + } + os.Setenv(parts[0], parts[1]) + } + + cnf, err := config.NewFromEnvironment() + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "broker", cnf.Broker) + assert.Equal(t, "default_queue", cnf.DefaultQueue) + assert.Equal(t, "result_backend", cnf.ResultBackend) + assert.Equal(t, 123456, cnf.ResultsExpireIn) + assert.Equal(t, "exchange", cnf.AMQP.Exchange) + assert.Equal(t, "exchange_type", cnf.AMQP.ExchangeType) + assert.Equal(t, "binding_key", cnf.AMQP.BindingKey) + assert.Equal(t, "any", cnf.AMQP.QueueBindingArgs["x-match"]) + assert.Equal(t, "png", cnf.AMQP.QueueBindingArgs["image-type"]) + assert.Equal(t, 123, cnf.AMQP.PrefetchCount) +} diff --git a/v2/config/file.go b/v2/config/file.go new file mode 100644 index 000000000..acc190348 --- /dev/null +++ b/v2/config/file.go @@ -0,0 +1,82 @@ +package config + +import ( + "fmt" + "os" + "time" + + "github.com/RichardKnop/machinery/v2/log" + "gopkg.in/yaml.v2" +) + +// NewFromYaml creates a config object from YAML file +func NewFromYaml(cnfPath string, keepReloading bool) (*Config, error) { + cnf, err := fromFile(cnfPath) + if err != nil { + return nil, err + } + + log.INFO.Printf("Successfully loaded config from file %s", cnfPath) + + if keepReloading { + // Open a goroutine to watch remote changes forever + go func() { + for { + // Delay after each request + time.Sleep(reloadDelay) + + // Attempt to reload the config + newCnf, newErr := fromFile(cnfPath) + if newErr != nil { + log.WARNING.Printf("Failed to reload config from file %s: %v", cnfPath, newErr) + continue + } + + *cnf = *newCnf + } + }() + } + + return cnf, nil +} + +// ReadFromFile reads data from a file +func ReadFromFile(cnfPath string) ([]byte, error) { + file, err := os.Open(cnfPath) + + // Config file not found + if err != nil { + return nil, fmt.Errorf("Open file error: %s", err) + } + + // Config file found, let's try to read it + data := make([]byte, 1000) + count, err := file.Read(data) + if err != nil { + return nil, fmt.Errorf("Read from file error: %s", err) + } + + return data[:count], nil +} + +func fromFile(cnfPath string) (*Config, error) { + loadedCnf, cnf := new(Config), new(Config) + *cnf = *defaultCnf + + data, err := ReadFromFile(cnfPath) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(data, cnf); err != nil { + return nil, fmt.Errorf("Unmarshal YAML error: %s", err) + } + if err := yaml.Unmarshal(data, loadedCnf); err != nil { + return nil, fmt.Errorf("Unmarshal YAML error: %s", err) + } + if loadedCnf.AMQP == nil { + cnf.AMQP = nil + } + + return cnf, nil +} diff --git a/v2/config/file_test.go b/v2/config/file_test.go new file mode 100644 index 000000000..8922e3b83 --- /dev/null +++ b/v2/config/file_test.go @@ -0,0 +1,97 @@ +package config_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/config" + "github.com/stretchr/testify/assert" +) + +var configYAMLData = `--- +broker: broker +default_queue: default_queue +result_backend: result_backend +results_expire_in: 123456 +amqp: + binding_key: binding_key + exchange: exchange + exchange_type: exchange_type + prefetch_count: 123 + queue_declare_args: + x-max-priority: 10 + queue_binding_args: + image-type: png + x-match: any +sqs: + receive_wait_time_seconds: 123 + receive_visibility_timeout: 456 +redis: + max_idle: 12 + max_active: 123 + max_idle_timeout: 456 + wait: false + read_timeout: 17 + write_timeout: 19 + connect_timeout: 21 + normal_tasks_poll_period: 1001 + delayed_tasks_poll_period: 23 + delayed_tasks_key: delayed_tasks_key + master_name: master_name +no_unix_signals: true +dynamodb: + task_states_table: task_states_table + group_metas_table: group_metas_table +` + +func TestReadFromFile(t *testing.T) { + t.Parallel() + + data, err := config.ReadFromFile("testconfig.yml") + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, configYAMLData, string(data)) +} + +func TestNewFromYaml(t *testing.T) { + t.Parallel() + + cnf, err := config.NewFromYaml("testconfig.yml", false) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "broker", cnf.Broker) + assert.Equal(t, "default_queue", cnf.DefaultQueue) + assert.Equal(t, "result_backend", cnf.ResultBackend) + assert.Equal(t, 123456, cnf.ResultsExpireIn) + + assert.Equal(t, "exchange", cnf.AMQP.Exchange) + assert.Equal(t, "exchange_type", cnf.AMQP.ExchangeType) + assert.Equal(t, "binding_key", cnf.AMQP.BindingKey) + assert.Equal(t, 10, cnf.AMQP.QueueDeclareArgs["x-max-priority"]) + assert.Equal(t, "any", cnf.AMQP.QueueBindingArgs["x-match"]) + assert.Equal(t, "png", cnf.AMQP.QueueBindingArgs["image-type"]) + assert.Equal(t, 123, cnf.AMQP.PrefetchCount) + + assert.Equal(t, 123, cnf.SQS.WaitTimeSeconds) + assert.Equal(t, 456, *cnf.SQS.VisibilityTimeout) + + assert.Equal(t, 12, cnf.Redis.MaxIdle) + assert.Equal(t, 123, cnf.Redis.MaxActive) + assert.Equal(t, 456, cnf.Redis.IdleTimeout) + assert.Equal(t, false, cnf.Redis.Wait) + assert.Equal(t, 17, cnf.Redis.ReadTimeout) + assert.Equal(t, 19, cnf.Redis.WriteTimeout) + assert.Equal(t, 21, cnf.Redis.ConnectTimeout) + assert.Equal(t, 1001, cnf.Redis.NormalTasksPollPeriod) + assert.Equal(t, 23, cnf.Redis.DelayedTasksPollPeriod) + assert.Equal(t, "delayed_tasks_key", cnf.Redis.DelayedTasksKey) + assert.Equal(t, "master_name", cnf.Redis.MasterName) + + assert.Equal(t, true, cnf.NoUnixSignals) + + assert.Equal(t, "task_states_table", cnf.DynamoDB.TaskStatesTable) + assert.Equal(t, "group_metas_table", cnf.DynamoDB.GroupMetasTable) +} diff --git a/v2/config/test.env b/v2/config/test.env new file mode 100644 index 000000000..cfbb935a2 --- /dev/null +++ b/v2/config/test.env @@ -0,0 +1,9 @@ +BROKER=broker +DEFAULT_QUEUE=default_queue +RESULT_BACKEND=result_backend +RESULTS_EXPIRE_IN=123456 +AMQP_BINDING_KEY=binding_key +AMQP_EXCHANGE=exchange +AMQP_EXCHANGE_TYPE=exchange_type +AMQP_PREFETCH_COUNT=123 +AMQP_QUEUE_BINDING_ARGS=image-type:png,x-match:any diff --git a/v2/config/testconfig.yml b/v2/config/testconfig.yml new file mode 100644 index 000000000..4fd83ecbd --- /dev/null +++ b/v2/config/testconfig.yml @@ -0,0 +1,34 @@ +--- +broker: broker +default_queue: default_queue +result_backend: result_backend +results_expire_in: 123456 +amqp: + binding_key: binding_key + exchange: exchange + exchange_type: exchange_type + prefetch_count: 123 + queue_declare_args: + x-max-priority: 10 + queue_binding_args: + image-type: png + x-match: any +sqs: + receive_wait_time_seconds: 123 + receive_visibility_timeout: 456 +redis: + max_idle: 12 + max_active: 123 + max_idle_timeout: 456 + wait: false + read_timeout: 17 + write_timeout: 19 + connect_timeout: 21 + normal_tasks_poll_period: 1001 + delayed_tasks_poll_period: 23 + delayed_tasks_key: delayed_tasks_key + master_name: master_name +no_unix_signals: true +dynamodb: + task_states_table: task_states_table + group_metas_table: group_metas_table diff --git a/example/v2/amqp/main.go b/v2/example/amqp/main.go similarity index 96% rename from example/v2/amqp/main.go rename to v2/example/amqp/main.go index d7e0fd66c..01427cf6a 100644 --- a/example/v2/amqp/main.go +++ b/v2/example/amqp/main.go @@ -10,16 +10,16 @@ import ( "github.com/google/uuid" "github.com/urfave/cli" - "github.com/RichardKnop/machinery/v1/config" - "github.com/RichardKnop/machinery/v1/log" - "github.com/RichardKnop/machinery/v1/tasks" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" "github.com/RichardKnop/machinery/v2" - exampletasks "github.com/RichardKnop/machinery/example/tasks" - tracers "github.com/RichardKnop/machinery/example/tracers" - amqpbackend "github.com/RichardKnop/machinery/v1/backends/amqp" - amqpbroker "github.com/RichardKnop/machinery/v1/brokers/amqp" - eagerlock "github.com/RichardKnop/machinery/v1/locks/eager" + exampletasks "github.com/RichardKnop/machinery/v2/example/tasks" + tracers "github.com/RichardKnop/machinery/v2/example/tracers" + amqpbackend "github.com/RichardKnop/machinery/v2/backends/amqp" + amqpbroker "github.com/RichardKnop/machinery/v2/brokers/amqp" + eagerlock "github.com/RichardKnop/machinery/v2/locks/eager" opentracing "github.com/opentracing/opentracing-go" opentracing_log "github.com/opentracing/opentracing-go/log" ) diff --git a/example/v2/go-redis/main.go b/v2/example/go-redis/main.go similarity index 96% rename from example/v2/go-redis/main.go rename to v2/example/go-redis/main.go index 9ebfe7ac6..2a5859e76 100644 --- a/example/v2/go-redis/main.go +++ b/v2/example/go-redis/main.go @@ -10,16 +10,16 @@ import ( "github.com/google/uuid" "github.com/urfave/cli" - "github.com/RichardKnop/machinery/v1/config" - "github.com/RichardKnop/machinery/v1/log" - "github.com/RichardKnop/machinery/v1/tasks" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" "github.com/RichardKnop/machinery/v2" - exampletasks "github.com/RichardKnop/machinery/example/tasks" - tracers "github.com/RichardKnop/machinery/example/tracers" - redisbackend "github.com/RichardKnop/machinery/v1/backends/redis" - redisbroker "github.com/RichardKnop/machinery/v1/brokers/redis" - eagerlock "github.com/RichardKnop/machinery/v1/locks/eager" + exampletasks "github.com/RichardKnop/machinery/v2/example/tasks" + tracers "github.com/RichardKnop/machinery/v2/example/tracers" + redisbackend "github.com/RichardKnop/machinery/v2/backends/redis" + redisbroker "github.com/RichardKnop/machinery/v2/brokers/redis" + eagerlock "github.com/RichardKnop/machinery/v2/locks/eager" opentracing "github.com/opentracing/opentracing-go" opentracing_log "github.com/opentracing/opentracing-go/log" ) diff --git a/example/v2/redigo/main.go b/v2/example/redigo/main.go similarity index 96% rename from example/v2/redigo/main.go rename to v2/example/redigo/main.go index f57197040..9718564ce 100644 --- a/example/v2/redigo/main.go +++ b/v2/example/redigo/main.go @@ -10,16 +10,16 @@ import ( "github.com/google/uuid" "github.com/urfave/cli" - "github.com/RichardKnop/machinery/v1/config" - "github.com/RichardKnop/machinery/v1/log" - "github.com/RichardKnop/machinery/v1/tasks" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" "github.com/RichardKnop/machinery/v2" - exampletasks "github.com/RichardKnop/machinery/example/tasks" - tracers "github.com/RichardKnop/machinery/example/tracers" - redisbackend "github.com/RichardKnop/machinery/v1/backends/redis" - redisbroker "github.com/RichardKnop/machinery/v1/brokers/redis" - eagerlock "github.com/RichardKnop/machinery/v1/locks/eager" + exampletasks "github.com/RichardKnop/machinery/v2/example/tasks" + tracers "github.com/RichardKnop/machinery/v2/example/tracers" + redisbackend "github.com/RichardKnop/machinery/v2/backends/redis" + redisbroker "github.com/RichardKnop/machinery/v2/brokers/redis" + eagerlock "github.com/RichardKnop/machinery/v2/locks/eager" opentracing "github.com/opentracing/opentracing-go" opentracing_log "github.com/opentracing/opentracing-go/log" ) diff --git a/v2/example/tasks/tasks.go b/v2/example/tasks/tasks.go new file mode 100644 index 000000000..750b82635 --- /dev/null +++ b/v2/example/tasks/tasks.go @@ -0,0 +1,75 @@ +package exampletasks + +import ( + "errors" + "strings" + "time" + + "github.com/RichardKnop/machinery/v2/log" +) + +// Add ... +func Add(args ...int64) (int64, error) { + sum := int64(0) + for _, arg := range args { + sum += arg + } + return sum, nil +} + +// Multiply ... +func Multiply(args ...int64) (int64, error) { + sum := int64(1) + for _, arg := range args { + sum *= arg + } + return sum, nil +} + +// SumInts ... +func SumInts(numbers []int64) (int64, error) { + var sum int64 + for _, num := range numbers { + sum += num + } + return sum, nil +} + +// SumFloats ... +func SumFloats(numbers []float64) (float64, error) { + var sum float64 + for _, num := range numbers { + sum += num + } + return sum, nil +} + +// Concat ... +func Concat(strs []string) (string, error) { + var res string + for _, s := range strs { + res += s + } + return res, nil +} + +// Split ... +func Split(str string) ([]string, error) { + return strings.Split(str, ""), nil +} + +// PanicTask ... +func PanicTask() (string, error) { + panic(errors.New("oops")) +} + +// LongRunningTask ... +func LongRunningTask() error { + log.INFO.Print("Long running task started") + for i := 0; i < 10; i++ { + log.INFO.Print(10 - i) + time.Sleep(1 * time.Second) + } + log.INFO.Print("Long running task finished") + return nil +} diff --git a/v2/example/tracers/jaeger.go b/v2/example/tracers/jaeger.go new file mode 100644 index 000000000..6824d8244 --- /dev/null +++ b/v2/example/tracers/jaeger.go @@ -0,0 +1,41 @@ +package tracers + +// Uncomment the import statement for the jaeger tracer. +// make sure you run dep ensure to pull in the jaeger client +// +// import ( +// jaeger "github.com/uber/jaeger-client-go" +// jaegercfg "github.com/uber/jaeger-client-go/config" +// ) + +// SetupTracer is the place where you'd setup your specific tracer. +// The jaeger tracer is given as an example. +// To capture the jaeger traces you should run the jaeger backend. +// This can be done using the following docker command: +// +// `docker run -ti --rm -p6831:6831/udp -p16686:16686 jaegertracing/all-in-one:latest` +// +// The collector will be listening on localhost:6831 +// and the query UI is reachable on localhost:16686. +func SetupTracer(serviceName string) (func(), error) { + + // Jaeger setup code + // + // config := jaegercfg.Configuration{ + // Sampler: &jaegercfg.SamplerConfig{ + // Type: jaeger.SamplerTypeConst, + // Param: 1, + // }, + // } + + // closer, err := config.InitGlobalTracer(serviceName) + // if err != nil { + // return nil, err + // } + + cleanupFunc := func() { + // closer.Close() + } + + return cleanupFunc, nil +} diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 000000000..929eed489 --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,25 @@ +module github.com/RichardKnop/machinery/v2 + +go 1.15 + +require ( + cloud.google.com/go/pubsub v1.10.0 + github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae + github.com/aws/aws-sdk-go v1.37.16 + github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b + github.com/go-redis/redis/v8 v8.6.0 + github.com/go-redsync/redsync/v4 v4.0.4 + github.com/gomodule/redigo v2.0.0+incompatible + github.com/google/uuid v1.2.0 + github.com/kelseyhightower/envconfig v1.4.0 + github.com/opentracing/opentracing-go v1.2.0 + github.com/pkg/errors v0.9.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/streadway/amqp v1.0.0 + github.com/stretchr/testify v1.7.0 + github.com/urfave/cli v1.22.5 + go.mongodb.org/mongo-driver v1.4.6 + gopkg.in/yaml.v2 v2.4.0 +) + +replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 000000000..56fb09837 --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,559 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.10.0/go.mod h1:eNpTrkOy7dCpkNyaSNetMa6udbgecJMd0ZsTJS/cuNo= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae/go.mod h1:rJJ84PyA/Wlmw1hO+xTzV2wsSUon6J5ktg0g8BF2PuU= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.37.16/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v8 v8.1.1/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= +github.com/go-redis/redis/v8 v8.6.0/go.mod h1:DQ9q4Rk2HtwkrwVrdgmphoOQDMfpvcd/nHEwRsicg8s= +github.com/go-redsync/redsync/v4 v4.0.4/go.mod h1:QBOJAs1k8O6Eyrre4a++pxQgHe5eQ+HF56KuTVv+8Bs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +go.opentelemetry.io/otel v0.17.0/go.mod h1:Oqtdxmf7UtEvL037ohlgnaYa1h7GtMh0NcSd9eqkC9s= +go.opentelemetry.io/otel/metric v0.17.0/go.mod h1:hUz9lH1rNXyEwWAhIWCMFWKhYtpASgSnObJFnU26dJ0= +go.opentelemetry.io/otel/oteltest v0.17.0/go.mod h1:JT/LGFxPwpN+nlsTiinSYjdIx3hZIGqHCpChcIZmdoE= +go.opentelemetry.io/otel/trace v0.17.0/go.mod h1:bIujpqg6ZL6xUTubIUgziI1jSaUPthmabA/ygf/6Cfg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/v2/integration-tests/amqp_amqp_test.go b/v2/integration-tests/amqp_amqp_test.go new file mode 100644 index 000000000..4c8491627 --- /dev/null +++ b/v2/integration-tests/amqp_amqp_test.go @@ -0,0 +1,59 @@ +package integration_test + +import ( + "os" + "testing" + + "github.com/RichardKnop/machinery/v2" + "github.com/RichardKnop/machinery/v2/config" + + amqpbackend "github.com/RichardKnop/machinery/v2/backends/amqp" + amqpbroker "github.com/RichardKnop/machinery/v2/brokers/amqp" + eagerlock "github.com/RichardKnop/machinery/v2/locks/eager" +) + +func TestAmqpAmqp(t *testing.T) { + amqpURL := os.Getenv("AMQP_URL") + if amqpURL == "" { + t.Skip("AMQP_URL is not defined") + } + + finalAmqpURL := amqpURL + var finalSeparator string + + amqpURLs := os.Getenv("AMQP_URLS") + if amqpURLs != "" { + separator := os.Getenv("AMQP_URLS_SEPARATOR") + if separator == "" { + return + } + finalSeparator = separator + finalAmqpURL = amqpURLs + } + + cnf := &config.Config{ + Broker: finalAmqpURL, + MultipleBrokerSeparator: finalSeparator, + DefaultQueue: "machinery_tasks", + ResultBackend: amqpURL, + ResultsExpireIn: 3600, + AMQP: &config.AMQPConfig{ + Exchange: "test_exchange", + ExchangeType: "direct", + BindingKey: "test_task", + PrefetchCount: 1, + }, + } + + broker := amqpbroker.New(cnf) + backend := amqpbackend.New(cnf) + lock := eagerlock.New() + server := machinery.NewServer(cnf, broker, backend, lock) + + registerTestTasks(server) + + worker := server.NewWorker("test_worker", 0) + defer worker.Quit() + go worker.Launch() + testAll(server, t) +} diff --git a/v2/integration-tests/redis_redis_test.go b/v2/integration-tests/redis_redis_test.go new file mode 100644 index 000000000..2b1da4539 --- /dev/null +++ b/v2/integration-tests/redis_redis_test.go @@ -0,0 +1,49 @@ +package integration_test + +import ( + "errors" + "fmt" + "os" + "testing" + "time" + + "github.com/RichardKnop/machinery/v2" + "github.com/RichardKnop/machinery/v2/config" + + redisbackend "github.com/RichardKnop/machinery/v2/backends/redis" + redisbroker "github.com/RichardKnop/machinery/v2/brokers/redis" + eagerlock "github.com/RichardKnop/machinery/v2/locks/eager" +) + +func TestRedisRedis_GoRedis(t *testing.T) { + redisURL := os.Getenv("REDIS_URL") + if redisURL == "" { + t.Skip("REDIS_URL is not defined") + } + + cnf := &config.Config{ + DefaultQueue: "machinery_tasks", + ResultsExpireIn: 3600, + Redis: &config.RedisConfig{ + MaxIdle: 3, + IdleTimeout: 240, + ReadTimeout: 15, + WriteTimeout: 15, + ConnectTimeout: 15, + NormalTasksPollPeriod: 1000, + DelayedTasksPollPeriod: 500, + }, + } + + broker := redisbroker.NewGR(cnf, []string{redisURL}, 0) + backend := redisbackend.NewGR(cnf, []string{redisURL}, 0) + lock := eagerlock.New() + server := machinery.NewServer(cnf, broker, backend, lock) + + registerTestTasks(server) + + worker := server.NewWorker("test_worker", 0) + defer worker.Quit() + go worker.Launch() + testAll(server, t) +} \ No newline at end of file diff --git a/v2/integration-tests/suite_test.go b/v2/integration-tests/suite_test.go new file mode 100644 index 000000000..b57458c75 --- /dev/null +++ b/v2/integration-tests/suite_test.go @@ -0,0 +1,488 @@ +package integration_test + +import ( + "context" + "errors" + "log" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/RichardKnop/machinery/v2" + "github.com/RichardKnop/machinery/v2/backends/result" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/tasks" + + brokersiface "github.com/RichardKnop/machinery/v2/brokers/iface" +) + +type ascendingInt64s []int64 + +func (a ascendingInt64s) Len() int { return len(a) } +func (a ascendingInt64s) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ascendingInt64s) Less(i, j int) bool { return a[i] < a[j] } + +type Server interface { + GetBroker() brokersiface.Broker + GetConfig() *config.Config + RegisterTasks(namedTaskFuncs map[string]interface{}) error + SendTaskWithContext(ctx context.Context, signature *tasks.Signature) (*result.AsyncResult, error) + SendTask(signature *tasks.Signature) (*result.AsyncResult, error) + SendChainWithContext(ctx context.Context, chain *tasks.Chain) (*result.ChainAsyncResult, error) + SendChain(chain *tasks.Chain) (*result.ChainAsyncResult, error) + SendGroupWithContext(ctx context.Context, group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) + SendGroup(group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) + SendChordWithContext(ctx context.Context, chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) + SendChord(chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) +} + +func testAll(server Server, t *testing.T) { + testSendTask(server, t) + testSendGroup(server, t, 0) // with unlimited concurrency + testSendGroup(server, t, 2) // with limited concurrency (2 parallel tasks at the most) + testSendChord(server, t) + testSendChain(server, t) + testReturnJustError(server, t) + testReturnMultipleValues(server, t) + testPanic(server, t) + testDelay(server, t) +} + +func testSendTask(server Server, t *testing.T) { + addTask := newAddTask(1, 1) + + asyncResult, err := server.SendTask(addTask) + if err != nil { + t.Error(err) + } + + results, err := asyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + if results[0].Interface() != int64(2) { + t.Errorf( + "result = %v(%v), want int64(2)", + results[0].Type().String(), + results[0].Interface(), + ) + } + + sumTask := newSumTask([]int64{1, 2}) + asyncResult, err = server.SendTask(sumTask) + if err != nil { + t.Error(err) + } + + results, err = asyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + if results[0].Interface() != int64(3) { + t.Errorf( + "result = %v(%v), want int64(3)", + results[0].Type().String(), + results[0].Interface(), + ) + } +} + +func testSendGroup(server Server, t *testing.T, sendConcurrency int) { + t1, t2, t3 := newAddTask(1, 1), newAddTask(2, 2), newAddTask(5, 6) + + group, err := tasks.NewGroup(t1, t2, t3) + if err != nil { + t.Fatal(err) + } + + asyncResults, err := server.SendGroup(group, sendConcurrency) + if err != nil { + t.Error(err) + } + + expectedResults := []int64{2, 4, 11} + + actualResults := make([]int64, 3) + + for i, asyncResult := range asyncResults { + results, err := asyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + intResult, ok := results[0].Interface().(int64) + if !ok { + t.Errorf("Could not convert %v to int64", results[0].Interface()) + } + actualResults[i] = intResult + } + + sort.Sort(ascendingInt64s(actualResults)) + + if !reflect.DeepEqual(expectedResults, actualResults) { + t.Errorf( + "expected results = %v, actual results = %v", + expectedResults, + actualResults, + ) + } +} + +func testSendChain(server Server, t *testing.T) { + t1, t2, t3 := newAddTask(2, 2), newAddTask(5, 6), newMultipleTask(4) + + chain, err := tasks.NewChain(t1, t2, t3) + if err != nil { + t.Fatal(err) + } + + chainAsyncResult, err := server.SendChain(chain) + if err != nil { + t.Error(err) + } + + results, err := chainAsyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + if results[0].Interface() != int64(60) { + t.Errorf( + "result = %v(%v), want int64(60)", + results[0].Type().String(), + results[0].Interface(), + ) + } +} + +func testSendChord(server Server, t *testing.T) { + t1, t2, t3, t4 := newAddTask(1, 1), newAddTask(2, 2), newAddTask(5, 6), newMultipleTask() + + group, err := tasks.NewGroup(t1, t2, t3) + if err != nil { + t.Fatal(err) + } + + chord, err := tasks.NewChord(group, t4) + if err != nil { + t.Fatal(err) + } + + chordAsyncResult, err := server.SendChord(chord, 10) + if err != nil { + t.Error(err) + } + + results, err := chordAsyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + if results[0].Interface() != int64(88) { + t.Errorf( + "result = %v(%v), want int64(88)", + results[0].Type().String(), + results[0].Interface(), + ) + } +} + +func testReturnJustError(server Server, t *testing.T) { + // Fails, returns error as the only value + task := newErrorTask("Test error", true) + asyncResult, err := server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err := asyncResult.Get(time.Duration(time.Millisecond * 5)) + if len(results) != 0 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 0) + } + assert.Equal(t, "Test error", err.Error()) + + // Successful, returns nil as the only value + task = newErrorTask("", false) + asyncResult, err = server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err = asyncResult.Get(time.Duration(time.Millisecond * 5)) + if len(results) != 0 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 0) + } + assert.NoError(t, err) +} + +func testReturnMultipleValues(server Server, t *testing.T) { + // Successful task with multiple return values + task := newMultipleReturnTask("foo", "bar", false) + + asyncResult, err := server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err := asyncResult.Get(time.Duration(time.Millisecond * 5)) + if err != nil { + t.Error(err) + } + + if len(results) != 2 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 2) + } + + if results[0].Interface() != "foo" { + t.Errorf( + "result = %v(%v), want string(\"foo\":)", + results[0].Type().String(), + results[0].Interface(), + ) + } + + if results[1].Interface() != "bar" { + t.Errorf( + "result = %v(%v), want string(\"bar\":)", + results[1].Type().String(), + results[1].Interface(), + ) + } + + // Failed task with multiple return values + task = newMultipleReturnTask("", "", true) + + asyncResult, err = server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err = asyncResult.Get(time.Duration(time.Millisecond * 5)) + if len(results) != 0 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 0) + } + assert.Error(t, err) +} + +func testPanic(server Server, t *testing.T) { + task := &tasks.Signature{Name: "panic"} + asyncResult, err := server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err := asyncResult.Get(time.Duration(time.Millisecond * 5)) + if len(results) != 0 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 0) + } + assert.Equal(t, "oops", err.Error()) +} + +func testDelay(server Server, t *testing.T) { + now := time.Now().UTC() + eta := now.Add(100 * time.Millisecond) + task := newDelayTask(eta) + asyncResult, err := server.SendTask(task) + if err != nil { + t.Error(err) + } + + results, err := asyncResult.Get(time.Duration(5 * time.Millisecond)) + if err != nil { + t.Error(err) + } + + if len(results) != 1 { + t.Errorf("Number of results returned = %d. Wanted %d", len(results), 1) + } + + tm, ok := results[0].Interface().(int64) + if !ok { + t.Errorf( + "Could not type assert = %v(%v) to int64", + results[0].Type().String(), + results[0].Interface(), + ) + } + + if tm < eta.UnixNano() { + t.Errorf( + "result = %v(%v), want >= int64(%d)", + results[0].Type().String(), + results[0].Interface(), + eta.UnixNano(), + ) + } +} + +func registerTestTasks(server Server) { + + tasks := map[string]interface{}{ + "add": func(args ...int64) (int64, error) { + sum := int64(0) + for _, arg := range args { + sum += arg + } + return sum, nil + }, + "multiply": func(args ...int64) (int64, error) { + sum := int64(1) + for _, arg := range args { + sum *= arg + } + return sum, nil + }, + "sum": func(numbers []int64) (int64, error) { + var sum int64 + for _, num := range numbers { + sum += num + } + return sum, nil + }, + "return_just_error": func(msg string, fail bool) (err error) { + if fail { + err = errors.New(msg) + } + return err + }, + "return_multiple_values": func(arg1, arg2 string, fail bool) (r1 string, r2 string, err error) { + if fail { + err = errors.New("some error") + } else { + r1 = arg1 + r2 = arg2 + } + return r1, r2, err + }, + "panic": func() (string, error) { + panic(errors.New("oops")) + }, + "delay_test": func() (int64, error) { + return time.Now().UTC().UnixNano(), nil + }, + } + + server.RegisterTasks(tasks) +} + +func testSetup(cnf *config.Config) Server { + + server, err := machinery.NewServer(cnf) + if err != nil { + log.Fatal(err, "Could not initialize server") + } + + registerTestTasks(server) + + return server +} + +func newAddTask(a, b int) *tasks.Signature { + return &tasks.Signature{ + Name: "add", + Args: []tasks.Arg{ + { + Type: "int64", + Value: a, + }, + { + Type: "int64", + Value: b, + }, + }, + } +} + +func newMultipleTask(nums ...int) *tasks.Signature { + args := make([]tasks.Arg, len(nums)) + for i, n := range nums { + args[i] = tasks.Arg{ + Type: "int64", + Value: n, + } + } + return &tasks.Signature{ + Name: "multiply", + Args: args, + } +} + +func newSumTask(nums []int64) *tasks.Signature { + return &tasks.Signature{ + Name: "sum", + Args: []tasks.Arg{ + { + Type: "[]int64", + Value: nums, + }, + }, + } +} + +func newErrorTask(msg string, fail bool) *tasks.Signature { + return &tasks.Signature{ + Name: "return_just_error", + Args: []tasks.Arg{ + { + Type: "string", + Value: msg, + }, + { + Type: "bool", + Value: fail, + }, + }, + } +} + +func newMultipleReturnTask(arg1, arg2 string, fail bool) *tasks.Signature { + return &tasks.Signature{ + Name: "return_multiple_values", + Args: []tasks.Arg{ + { + Type: "string", + Value: arg1, + }, + { + Type: "string", + Value: arg2, + }, + { + Type: "bool", + Value: fail, + }, + }, + } +} + +func newDelayTask(eta time.Time) *tasks.Signature { + return &tasks.Signature{ + Name: "delay_test", + ETA: &eta, + } +} diff --git a/v2/locks/eager/eager.go b/v2/locks/eager/eager.go new file mode 100644 index 000000000..5aa8c63e7 --- /dev/null +++ b/v2/locks/eager/eager.go @@ -0,0 +1,55 @@ +package eager + +import ( + "errors" + "sync" + "time" +) + +var ( + ErrEagerLockFailed = errors.New("eager lock: failed to acquire lock") +) + +type Lock struct { + retries int + interval time.Duration + register struct { + sync.RWMutex + m map[string]int64 + } +} + +func New() *Lock { + return &Lock{ + retries: 3, + interval: 5 * time.Second, + register: struct { + sync.RWMutex + m map[string]int64 + }{m: make(map[string]int64)}, + } +} + +func (e *Lock) LockWithRetries(key string, value int64) error { + for i := 0; i <= e.retries; i++ { + err := e.Lock(key, value) + if err == nil { + //成功拿到锁,返回 + return nil + } + + time.Sleep(e.interval) + } + return ErrEagerLockFailed +} + +func (e *Lock) Lock(key string, value int64) error { + e.register.RLock() + defer e.register.RUnlock() + timeout, exist := e.register.m[key] + if !exist || time.Now().UnixNano() > timeout { + e.register.m[key] = value + return nil + } + return ErrEagerLockFailed +} diff --git a/v2/locks/eager/eager_test.go b/v2/locks/eager/eager_test.go new file mode 100644 index 000000000..bdc60d348 --- /dev/null +++ b/v2/locks/eager/eager_test.go @@ -0,0 +1,42 @@ +package eager + +import ( + lockiface "github.com/RichardKnop/machinery/v2/locks/iface" + "github.com/RichardKnop/machinery/v2/utils" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +func TestLock_Lock(t *testing.T) { + lock := New() + keyName := utils.GetPureUUID() + + go func() { + err := lock.Lock(keyName, time.Now().Add(25*time.Second).UnixNano()) + assert.NoError(t, err) + }() + time.Sleep(1 * time.Second) + err := lock.Lock(keyName, time.Now().Add(25*time.Second).UnixNano()) + assert.Error(t, err) + assert.EqualError(t, err, ErrEagerLockFailed.Error()) +} + +func TestLock_LockWithRetries(t *testing.T) { + lock := New() + keyName := utils.GetPureUUID() + + go func() { + err := lock.LockWithRetries(keyName, time.Now().Add(25*time.Second).UnixNano()) + assert.NoError(t, err) + }() + time.Sleep(1 * time.Second) + err := lock.LockWithRetries(keyName, time.Now().Add(25*time.Second).UnixNano()) + assert.Error(t, err) + assert.EqualError(t, err, ErrEagerLockFailed.Error()) +} + +func TestNew(t *testing.T) { + lock := New() + assert.Implements(t, (*lockiface.Lock)(nil), lock) +} diff --git a/v2/locks/iface/interfaces.go b/v2/locks/iface/interfaces.go new file mode 100644 index 000000000..0ceb5e7fb --- /dev/null +++ b/v2/locks/iface/interfaces.go @@ -0,0 +1,13 @@ +package iface + +type Lock interface { + //Acquire the lock with retry + //key: the name of the lock, + //value: at the nanosecond timestamp that lock needs to be released automatically + LockWithRetries(key string, value int64) error + + //Acquire the lock with once + //key: the name of the lock, + //value: at the nanosecond timestamp that lock needs to be released automatically + Lock(key string, value int64) error +} diff --git a/v2/locks/redis/redis.go b/v2/locks/redis/redis.go new file mode 100644 index 000000000..413337af1 --- /dev/null +++ b/v2/locks/redis/redis.go @@ -0,0 +1,109 @@ +package redis + +import ( + "errors" + "strconv" + "strings" + "time" + + "github.com/RichardKnop/machinery/v2/config" + "github.com/go-redis/redis/v8" +) + +var ( + ErrRedisLockFailed = errors.New("redis lock: failed to acquire lock") +) + +type Lock struct { + rclient redis.UniversalClient + retries int + interval time.Duration +} + +func New(cnf *config.Config, addrs []string, db, retries int) Lock { + if retries <= 0 { + return Lock{} + } + lock := Lock{retries: retries} + + var password string + + parts := strings.Split(addrs[0], "@") + if len(parts) == 2 { + password = parts[0] + addrs[0] = parts[1] + } + + ropt := &redis.UniversalOptions{ + Addrs: addrs, + DB: db, + Password: password, + } + if cnf.Redis != nil { + ropt.MasterName = cnf.Redis.MasterName + } + + lock.rclient = redis.NewUniversalClient(ropt) + + return lock +} + +func (r Lock) LockWithRetries(key string, unixTsToExpireNs int64) error { + for i := 0; i <= r.retries; i++ { + err := r.Lock(key, unixTsToExpireNs) + if err == nil { + //成功拿到锁,返回 + return nil + } + + time.Sleep(r.interval) + } + return ErrRedisLockFailed +} + +func (r Lock) Lock(key string, unixTsToExpireNs int64) error { + now := time.Now().UnixNano() + expiration := time.Duration(unixTsToExpireNs + 1 - now) + ctx := r.rclient.Context() + + success, err := r.rclient.SetNX(ctx, key, unixTsToExpireNs, expiration).Result() + if err != nil { + return err + } + + if !success { + v, err := r.rclient.Get(ctx, key).Result() + if err != nil { + return err + } + timeout, err := strconv.Atoi(v) + if err != nil { + return err + } + + if timeout != 0 && now > int64(timeout) { + newTimeout, err := r.rclient.GetSet(ctx, key, unixTsToExpireNs).Result() + if err != nil { + return err + } + + curTimeout, err := strconv.Atoi(newTimeout) + if err != nil { + return err + } + + if now > int64(curTimeout) { + // success to acquire lock with get set + // set the expiration of redis key + r.rclient.Expire(ctx, key, expiration) + return nil + } + + return ErrRedisLockFailed + } + + return ErrRedisLockFailed + } + + return nil +} diff --git a/v2/log/log.go b/v2/log/log.go new file mode 100644 index 000000000..12f382c61 --- /dev/null +++ b/v2/log/log.go @@ -0,0 +1,54 @@ +package log + +import ( + "github.com/RichardKnop/logging" +) + +var ( + logger = logging.New(nil, nil, new(logging.ColouredFormatter)) + + // DEBUG ... + DEBUG = logger[logging.DEBUG] + // INFO ... + INFO = logger[logging.INFO] + // WARNING ... + WARNING = logger[logging.WARNING] + // ERROR ... + ERROR = logger[logging.ERROR] + // FATAL ... + FATAL = logger[logging.FATAL] +) + +// Set sets a custom logger for all log levels +func Set(l logging.LoggerInterface) { + DEBUG = l + INFO = l + WARNING = l + ERROR = l + FATAL = l +} + +// SetDebug sets a custom logger for DEBUG level logs +func SetDebug(l logging.LoggerInterface) { + DEBUG = l +} + +// SetInfo sets a custom logger for INFO level logs +func SetInfo(l logging.LoggerInterface) { + INFO = l +} + +// SetWarning sets a custom logger for WARNING level logs +func SetWarning(l logging.LoggerInterface) { + WARNING = l +} + +// SetError sets a custom logger for ERROR level logs +func SetError(l logging.LoggerInterface) { + ERROR = l +} + +// SetFatal sets a custom logger for FATAL level logs +func SetFatal(l logging.LoggerInterface) { + FATAL = l +} diff --git a/v2/log/log_test.go b/v2/log/log_test.go new file mode 100644 index 000000000..1e381ac8b --- /dev/null +++ b/v2/log/log_test.go @@ -0,0 +1,14 @@ +package log_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/log" +) + +func TestDefaultLogger(t *testing.T) { + log.INFO.Print("should not panic") + log.WARNING.Print("should not panic") + log.ERROR.Print("should not panic") + log.FATAL.Print("should not panic") +} diff --git a/v2/retry/fibonacci.go b/v2/retry/fibonacci.go new file mode 100644 index 000000000..9a7bd1bf7 --- /dev/null +++ b/v2/retry/fibonacci.go @@ -0,0 +1,20 @@ +package retry + +// Fibonacci returns successive Fibonacci numbers starting from 1 +func Fibonacci() func() int { + a, b := 0, 1 + return func() int { + a, b = b, a+b + return a + } +} + +// FibonacciNext returns next number in Fibonacci sequence greater than start +func FibonacciNext(start int) int { + fib := Fibonacci() + num := fib() + for num <= start { + num = fib() + } + return num +} diff --git a/v2/retry/fibonacci_test.go b/v2/retry/fibonacci_test.go new file mode 100644 index 000000000..ea46730ab --- /dev/null +++ b/v2/retry/fibonacci_test.go @@ -0,0 +1,32 @@ +package retry_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/retry" + "github.com/stretchr/testify/assert" +) + +func TestFibonacci(t *testing.T) { + fibonacci := retry.Fibonacci() + + sequence := []int{ + fibonacci(), + fibonacci(), + fibonacci(), + fibonacci(), + fibonacci(), + fibonacci(), + } + + assert.EqualValues(t, sequence, []int{1, 1, 2, 3, 5, 8}) +} + +func TestFibonacciNext(t *testing.T) { + assert.Equal(t, 1, retry.FibonacciNext(0)) + assert.Equal(t, 2, retry.FibonacciNext(1)) + assert.Equal(t, 5, retry.FibonacciNext(3)) + assert.Equal(t, 5, retry.FibonacciNext(4)) + assert.Equal(t, 8, retry.FibonacciNext(5)) + assert.Equal(t, 13, retry.FibonacciNext(8)) +} diff --git a/v2/retry/retry.go b/v2/retry/retry.go new file mode 100644 index 000000000..94b1e0b68 --- /dev/null +++ b/v2/retry/retry.go @@ -0,0 +1,31 @@ +package retry + +import ( + "fmt" + "time" + + "github.com/RichardKnop/machinery/v2/log" +) + +// Closure - a useful closure we can use when there is a problem +// connecting to the broker. It uses Fibonacci sequence to space out retry attempts +var Closure = func() func(chan int) { + retryIn := 0 + fibonacci := Fibonacci() + return func(stopChan chan int) { + if retryIn > 0 { + durationString := fmt.Sprintf("%vs", retryIn) + duration, _ := time.ParseDuration(durationString) + + log.WARNING.Printf("Retrying in %v seconds", retryIn) + + select { + case <-stopChan: + break + case <-time.After(duration): + break + } + } + retryIn = fibonacci() + } +} diff --git a/v2/server.go b/v2/server.go index 9b8f317c8..4b737a4e5 100644 --- a/v2/server.go +++ b/v2/server.go @@ -10,16 +10,16 @@ import ( "github.com/google/uuid" "github.com/robfig/cron/v3" - "github.com/RichardKnop/machinery/v1/backends/result" - "github.com/RichardKnop/machinery/v1/config" - "github.com/RichardKnop/machinery/v1/log" - "github.com/RichardKnop/machinery/v1/tasks" - "github.com/RichardKnop/machinery/v1/tracing" - "github.com/RichardKnop/machinery/v1/utils" - - backendsiface "github.com/RichardKnop/machinery/v1/backends/iface" - brokersiface "github.com/RichardKnop/machinery/v1/brokers/iface" - lockiface "github.com/RichardKnop/machinery/v1/locks/iface" + "github.com/RichardKnop/machinery/v2/backends/result" + "github.com/RichardKnop/machinery/v2/config" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/RichardKnop/machinery/v2/tracing" + "github.com/RichardKnop/machinery/v2/utils" + + backendsiface "github.com/RichardKnop/machinery/v2/backends/iface" + brokersiface "github.com/RichardKnop/machinery/v2/brokers/iface" + lockiface "github.com/RichardKnop/machinery/v2/locks/iface" opentracing "github.com/opentracing/opentracing-go" ) diff --git a/v2/server_test.go b/v2/server_test.go index 93f4191f9..4f1966f08 100644 --- a/v2/server_test.go +++ b/v2/server_test.go @@ -6,11 +6,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/RichardKnop/machinery/v2" - "github.com/RichardKnop/machinery/v1/config" + "github.com/RichardKnop/machinery/v2/config" - backend "github.com/RichardKnop/machinery/v1/backends/eager" - broker "github.com/RichardKnop/machinery/v1/brokers/eager" - lock "github.com/RichardKnop/machinery/v1/locks/eager" + backend "github.com/RichardKnop/machinery/v2/backends/eager" + broker "github.com/RichardKnop/machinery/v2/brokers/eager" + lock "github.com/RichardKnop/machinery/v2/locks/eager" ) func TestRegisterTasks(t *testing.T) { diff --git a/v2/tasks/errors.go b/v2/tasks/errors.go new file mode 100644 index 000000000..fa32f97a2 --- /dev/null +++ b/v2/tasks/errors.go @@ -0,0 +1,32 @@ +package tasks + +import ( + "fmt" + "time" +) + +// ErrRetryTaskLater ... +type ErrRetryTaskLater struct { + name, msg string + retryIn time.Duration +} + +// RetryIn returns time.Duration from now when task should be retried +func (e ErrRetryTaskLater) RetryIn() time.Duration { + return e.retryIn +} + +// Error implements the error interface +func (e ErrRetryTaskLater) Error() string { + return fmt.Sprintf("Task error: %s Will retry in: %s", e.msg, e.retryIn) +} + +// NewErrRetryTaskLater returns new ErrRetryTaskLater instance +func NewErrRetryTaskLater(msg string, retryIn time.Duration) ErrRetryTaskLater { + return ErrRetryTaskLater{msg: msg, retryIn: retryIn} +} + +// Retriable is interface that retriable errors should implement +type Retriable interface { + RetryIn() time.Duration +} diff --git a/v2/tasks/reflect.go b/v2/tasks/reflect.go new file mode 100644 index 000000000..53aa7eb71 --- /dev/null +++ b/v2/tasks/reflect.go @@ -0,0 +1,352 @@ +package tasks + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +var ( + typesMap = map[string]reflect.Type{ + // base types + "bool": reflect.TypeOf(true), + "int": reflect.TypeOf(int(1)), + "int8": reflect.TypeOf(int8(1)), + "int16": reflect.TypeOf(int16(1)), + "int32": reflect.TypeOf(int32(1)), + "int64": reflect.TypeOf(int64(1)), + "uint": reflect.TypeOf(uint(1)), + "uint8": reflect.TypeOf(uint8(1)), + "uint16": reflect.TypeOf(uint16(1)), + "uint32": reflect.TypeOf(uint32(1)), + "uint64": reflect.TypeOf(uint64(1)), + "float32": reflect.TypeOf(float32(0.5)), + "float64": reflect.TypeOf(float64(0.5)), + "string": reflect.TypeOf(string("")), + // slices + "[]bool": reflect.TypeOf(make([]bool, 0)), + "[]int": reflect.TypeOf(make([]int, 0)), + "[]int8": reflect.TypeOf(make([]int8, 0)), + "[]int16": reflect.TypeOf(make([]int16, 0)), + "[]int32": reflect.TypeOf(make([]int32, 0)), + "[]int64": reflect.TypeOf(make([]int64, 0)), + "[]uint": reflect.TypeOf(make([]uint, 0)), + "[]uint8": reflect.TypeOf(make([]uint8, 0)), + "[]uint16": reflect.TypeOf(make([]uint16, 0)), + "[]uint32": reflect.TypeOf(make([]uint32, 0)), + "[]uint64": reflect.TypeOf(make([]uint64, 0)), + "[]float32": reflect.TypeOf(make([]float32, 0)), + "[]float64": reflect.TypeOf(make([]float64, 0)), + "[]byte": reflect.TypeOf(make([]byte, 0)), + "[]string": reflect.TypeOf([]string{""}), + } + + ctxType = reflect.TypeOf((*context.Context)(nil)).Elem() + + typeConversionError = func(argValue interface{}, argTypeStr string) error { + return fmt.Errorf("%v is not %v", argValue, argTypeStr) + } +) + +// ErrUnsupportedType ... +type ErrUnsupportedType struct { + valueType string +} + +// NewErrUnsupportedType returns new ErrUnsupportedType +func NewErrUnsupportedType(valueType string) ErrUnsupportedType { + return ErrUnsupportedType{valueType} +} + +// Error method so we implement the error interface +func (e ErrUnsupportedType) Error() string { + return fmt.Sprintf("%v is not one of supported types", e.valueType) +} + +// ReflectValue converts interface{} to reflect.Value based on string type +func ReflectValue(valueType string, value interface{}) (reflect.Value, error) { + if strings.HasPrefix(valueType, "[]") { + return reflectValues(valueType, value) + } + + return reflectValue(valueType, value) +} + +// reflectValue converts interface{} to reflect.Value based on string type +// representing a base type (not a slice) +func reflectValue(valueType string, value interface{}) (reflect.Value, error) { + theType, ok := typesMap[valueType] + if !ok { + return reflect.Value{}, NewErrUnsupportedType(valueType) + } + theValue := reflect.New(theType) + + // Booleans + if theType.String() == "bool" { + boolValue, err := getBoolValue(theType.String(), value) + if err != nil { + return reflect.Value{}, err + } + + theValue.Elem().SetBool(boolValue) + return theValue.Elem(), nil + } + + // Integers + if strings.HasPrefix(theType.String(), "int") { + intValue, err := getIntValue(theType.String(), value) + if err != nil { + return reflect.Value{}, err + } + + theValue.Elem().SetInt(intValue) + return theValue.Elem(), err + } + + // Unsigned integers + if strings.HasPrefix(theType.String(), "uint") { + uintValue, err := getUintValue(theType.String(), value) + if err != nil { + return reflect.Value{}, err + } + + theValue.Elem().SetUint(uintValue) + return theValue.Elem(), err + } + + // Floating point numbers + if strings.HasPrefix(theType.String(), "float") { + floatValue, err := getFloatValue(theType.String(), value) + if err != nil { + return reflect.Value{}, err + } + + theValue.Elem().SetFloat(floatValue) + return theValue.Elem(), err + } + + // Strings + if theType.String() == "string" { + stringValue, err := getStringValue(theType.String(), value) + if err != nil { + return reflect.Value{}, err + } + + theValue.Elem().SetString(stringValue) + return theValue.Elem(), nil + } + + return reflect.Value{}, NewErrUnsupportedType(valueType) +} + +// reflectValues converts interface{} to reflect.Value based on string type +// representing a slice of values +func reflectValues(valueType string, value interface{}) (reflect.Value, error) { + theType, ok := typesMap[valueType] + if !ok { + return reflect.Value{}, NewErrUnsupportedType(valueType) + } + + // For NULL we return an empty slice + if value == nil { + return reflect.MakeSlice(theType, 0, 0), nil + } + + var theValue reflect.Value + + // Booleans + if theType.String() == "[]bool" { + bools := reflect.ValueOf(value) + + theValue = reflect.MakeSlice(theType, bools.Len(), bools.Len()) + for i := 0; i < bools.Len(); i++ { + boolValue, err := getBoolValue(strings.Split(theType.String(), "[]")[1], bools.Index(i).Interface()) + if err != nil { + return reflect.Value{}, err + } + + theValue.Index(i).SetBool(boolValue) + } + + return theValue, nil + } + + // Integers + if strings.HasPrefix(theType.String(), "[]int") { + ints := reflect.ValueOf(value) + + theValue = reflect.MakeSlice(theType, ints.Len(), ints.Len()) + for i := 0; i < ints.Len(); i++ { + intValue, err := getIntValue(strings.Split(theType.String(), "[]")[1], ints.Index(i).Interface()) + if err != nil { + return reflect.Value{}, err + } + + theValue.Index(i).SetInt(intValue) + } + + return theValue, nil + } + + // Unsigned integers + if strings.HasPrefix(theType.String(), "[]uint") || theType.String() == "[]byte" { + + // Decode the base64 string if the value type is []uint8 or it's alias []byte + // See: https://golang.org/pkg/encoding/json/#Marshal + // > Array and slice values encode as JSON arrays, except that []byte encodes as a base64-encoded string + if reflect.TypeOf(value).String() == "string" { + output, err := base64.StdEncoding.DecodeString(value.(string)) + if err != nil { + return reflect.Value{}, err + } + value = output + } + + uints := reflect.ValueOf(value) + + theValue = reflect.MakeSlice(theType, uints.Len(), uints.Len()) + for i := 0; i < uints.Len(); i++ { + uintValue, err := getUintValue(strings.Split(theType.String(), "[]")[1], uints.Index(i).Interface()) + if err != nil { + return reflect.Value{}, err + } + + theValue.Index(i).SetUint(uintValue) + } + + return theValue, nil + } + + // Floating point numbers + if strings.HasPrefix(theType.String(), "[]float") { + floats := reflect.ValueOf(value) + + theValue = reflect.MakeSlice(theType, floats.Len(), floats.Len()) + for i := 0; i < floats.Len(); i++ { + floatValue, err := getFloatValue(strings.Split(theType.String(), "[]")[1], floats.Index(i).Interface()) + if err != nil { + return reflect.Value{}, err + } + + theValue.Index(i).SetFloat(floatValue) + } + + return theValue, nil + } + + // Strings + if theType.String() == "[]string" { + strs := reflect.ValueOf(value) + + theValue = reflect.MakeSlice(theType, strs.Len(), strs.Len()) + for i := 0; i < strs.Len(); i++ { + strValue, err := getStringValue(strings.Split(theType.String(), "[]")[1], strs.Index(i).Interface()) + if err != nil { + return reflect.Value{}, err + } + + theValue.Index(i).SetString(strValue) + } + + return theValue, nil + } + + return reflect.Value{}, NewErrUnsupportedType(valueType) +} + +func getBoolValue(theType string, value interface{}) (bool, error) { + b, ok := value.(bool) + if !ok { + return false, typeConversionError(value, typesMap[theType].String()) + } + + return b, nil +} + +func getIntValue(theType string, value interface{}) (int64, error) { + // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. + // This is because JSON only supports 64-bit floating point numbers and we could lose precision + // when converting from float64 to signed integer + if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { + n, ok := value.(json.Number) + if !ok { + return 0, typeConversionError(value, typesMap[theType].String()) + } + + return n.Int64() + } + + n, ok := value.(int64) + if !ok { + return 0, typeConversionError(value, typesMap[theType].String()) + } + + return n, nil +} + +func getUintValue(theType string, value interface{}) (uint64, error) { + // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. + // This is because JSON only supports 64-bit floating point numbers and we could lose precision + // when converting from float64 to unsigned integer + if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { + n, ok := value.(json.Number) + if !ok { + return 0, typeConversionError(value, typesMap[theType].String()) + } + + intVal, err := n.Int64() + if err != nil { + return 0, err + } + + return uint64(intVal), nil + } + + var n uint64 + switch value := value.(type) { + case uint64: + n = value + case uint8: + n = uint64(value) + default: + return 0, typeConversionError(value, typesMap[theType].String()) + } + return n, nil +} + +func getFloatValue(theType string, value interface{}) (float64, error) { + // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. + // This is because JSON only supports 64-bit floating point numbers and we could lose precision + if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { + n, ok := value.(json.Number) + if !ok { + return 0, typeConversionError(value, typesMap[theType].String()) + } + + return n.Float64() + } + + f, ok := value.(float64) + if !ok { + return 0, typeConversionError(value, typesMap[theType].String()) + } + + return f, nil +} + +func getStringValue(theType string, value interface{}) (string, error) { + s, ok := value.(string) + if !ok { + return "", typeConversionError(value, typesMap[theType].String()) + } + + return s, nil +} + +// IsContextType checks to see if the type is a context.Context +func IsContextType(t reflect.Type) bool { + return t == ctxType +} diff --git a/v2/tasks/reflect_test.go b/v2/tasks/reflect_test.go new file mode 100644 index 000000000..3fe9e692d --- /dev/null +++ b/v2/tasks/reflect_test.go @@ -0,0 +1,243 @@ +package tasks_test + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/RichardKnop/machinery/v2/tasks" +) + +var ( + reflectValuesTestCases = []struct { + name string + value interface{} + expectedType string + expectedValue interface{} + }{ + // basic types + { + name: "bool", + value: false, + expectedType: "bool", + }, + { + name: "int", + value: json.Number("123"), + expectedType: "int", + expectedValue: int(123), + }, + { + name: "int8", + value: json.Number("123"), + expectedType: "int8", + expectedValue: int8(123), + }, + { + name: "int16", + value: json.Number("123"), + expectedType: "int16", + expectedValue: int16(123), + }, + { + name: "int32", + value: json.Number("123"), + expectedType: "int32", + expectedValue: int32(123), + }, + { + name: "int64", + value: json.Number("185135722552891243"), + expectedType: "int64", + expectedValue: int64(185135722552891243), + }, + { + name: "uint", + value: json.Number("123"), + expectedType: "uint", + expectedValue: uint(123), + }, + { + name: "uint8", + value: json.Number("123"), + expectedType: "uint8", + expectedValue: uint8(123), + }, + { + name: "uint16", + value: json.Number("123"), + expectedType: "uint16", + expectedValue: uint16(123), + }, + { + name: "uint32", + value: json.Number("123"), + expectedType: "uint32", + expectedValue: uint32(123), + }, + { + name: "uint64", + value: json.Number("185135722552891243"), + expectedType: "uint64", + expectedValue: uint64(185135722552891243), + }, + { + name: "float32", + value: json.Number("0.5"), + expectedType: "float32", + expectedValue: float32(0.5), + }, + { + name: "float64", + value: json.Number("0.5"), + expectedType: "float64", + expectedValue: float64(0.5), + }, + { + name: "string", + value: "123", + expectedType: "string", + expectedValue: "123", + }, + // slices + { + name: "[]bool", + value: []interface{}{false, true}, + expectedType: "[]bool", + expectedValue: []bool{false, true}, + }, + { + name: "[]int", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]int", + expectedValue: []int{1, 2}, + }, + { + name: "[]int8", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]int8", + expectedValue: []int8{1, 2}, + }, + { + name: "[]int16", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]int16", + expectedValue: []int16{1, 2}, + }, + { + name: "[]int32", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]int32", + expectedValue: []int32{1, 2}, + }, + { + name: "[]int64", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]int64", + expectedValue: []int64{1, 2}, + }, + { + name: "[]uint", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]uint", + expectedValue: []uint{1, 2}, + }, + { + name: "[]uint8", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]uint8", + expectedValue: []uint8{1, 2}, + }, + { + name: "[]uint16", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]uint16", + expectedValue: []uint16{1, 2}, + }, + { + name: "[]uint32", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]uint32", + expectedValue: []uint32{1, 2}, + }, + { + name: "[]uint64", + value: []interface{}{json.Number("1"), json.Number("2")}, + expectedType: "[]uint64", + expectedValue: []uint64{1, 2}, + }, + { + name: "[]float32", + value: []interface{}{json.Number("0.5"), json.Number("1.28")}, + expectedType: "[]float32", + expectedValue: []float32{0.5, 1.28}, + }, + { + name: "[]float64", + value: []interface{}{json.Number("0.5"), json.Number("1.28")}, + expectedType: "[]float64", + expectedValue: []float64{0.5, 1.28}, + }, + { + name: "[]string", + value: []interface{}{"foo", "bar"}, + expectedType: "[]string", + expectedValue: []string{"foo", "bar"}, + }, + // empty slices from NULL + { + name: "[]bool", + value: nil, + expectedType: "[]bool", + expectedValue: []bool{}, + }, + { + name: "[]int64", + value: nil, + expectedType: "[]int64", + expectedValue: []int64{}, + }, + { + name: "[]uint64", + value: nil, + expectedType: "[]uint64", + expectedValue: []uint64{}, + }, + { + name: "[]float64", + value: nil, + expectedType: "[]float64", + expectedValue: []float64{}, + }, + { + name: "[]string", + value: nil, + expectedType: "[]string", + expectedValue: []string{}, + }, + } +) + +func TestReflectValue(t *testing.T) { + t.Parallel() + + for _, tc := range reflectValuesTestCases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + value, err := tasks.ReflectValue(tc.name, tc.value) + if err != nil { + t.Error(err) + } + if value.Type().String() != tc.expectedType { + t.Errorf("type is %v, want %s", value.Type().String(), tc.expectedType) + } + if tc.expectedValue != nil { + if !reflect.DeepEqual(value.Interface(), tc.expectedValue) { + t.Errorf("value is %v, want %v", value.Interface(), tc.expectedValue) + } + } + }) + } +} diff --git a/v2/tasks/result.go b/v2/tasks/result.go new file mode 100644 index 000000000..0beb62de7 --- /dev/null +++ b/v2/tasks/result.go @@ -0,0 +1,40 @@ +package tasks + +import ( + "fmt" + "reflect" + "strings" +) + +// TaskResult represents an actual return value of a processed task +type TaskResult struct { + Type string `bson:"type"` + Value interface{} `bson:"value"` +} + +// ReflectTaskResults ... +func ReflectTaskResults(taskResults []*TaskResult) ([]reflect.Value, error) { + resultValues := make([]reflect.Value, len(taskResults)) + for i, taskResult := range taskResults { + resultValue, err := ReflectValue(taskResult.Type, taskResult.Value) + if err != nil { + return nil, err + } + resultValues[i] = resultValue + } + return resultValues, nil +} + +// HumanReadableResults ... +func HumanReadableResults(results []reflect.Value) string { + if len(results) == 1 { + return fmt.Sprintf("%v", results[0].Interface()) + } + + readableResults := make([]string, len(results)) + for i := 0; i < len(results); i++ { + readableResults[i] = fmt.Sprintf("%v", results[i].Interface()) + } + + return fmt.Sprintf("[%s]", strings.Join(readableResults, ", ")) +} diff --git a/v2/tasks/result_test.go b/v2/tasks/result_test.go new file mode 100644 index 000000000..5d2466368 --- /dev/null +++ b/v2/tasks/result_test.go @@ -0,0 +1,27 @@ +package tasks_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestReflectTaskResults(t *testing.T) { + t.Parallel() + + taskResults := []*tasks.TaskResult{ + { + Type: "[]string", + Value: []string{"f", "o", "o"}, + }, + } + results, err := tasks.ReflectTaskResults(taskResults) + if assert.NoError(t, err) { + assert.Equal(t, 1, len(results)) + assert.Equal(t, 3, results[0].Len()) + assert.Equal(t, "f", results[0].Index(0).String()) + assert.Equal(t, "o", results[0].Index(1).String()) + assert.Equal(t, "o", results[0].Index(2).String()) + } +} diff --git a/v2/tasks/signature.go b/v2/tasks/signature.go new file mode 100644 index 000000000..f639391dd --- /dev/null +++ b/v2/tasks/signature.go @@ -0,0 +1,96 @@ +package tasks + +import ( + "fmt" + "github.com/RichardKnop/machinery/v2/utils" + "time" + + "github.com/google/uuid" +) + +// Arg represents a single argument passed to invocation fo a task +type Arg struct { + Name string `bson:"name"` + Type string `bson:"type"` + Value interface{} `bson:"value"` +} + +// Headers represents the headers which should be used to direct the task +type Headers map[string]interface{} + +// Set on Headers implements opentracing.TextMapWriter for trace propagation +func (h Headers) Set(key, val string) { + h[key] = val +} + +// ForeachKey on Headers implements opentracing.TextMapReader for trace propagation. +// It is essentially the same as the opentracing.TextMapReader implementation except +// for the added casting from interface{} to string. +func (h Headers) ForeachKey(handler func(key, val string) error) error { + for k, v := range h { + // Skip any non string values + stringValue, ok := v.(string) + if !ok { + continue + } + + if err := handler(k, stringValue); err != nil { + return err + } + } + + return nil +} + +// Signature represents a single task invocation +type Signature struct { + UUID string + Name string + RoutingKey string + ETA *time.Time + GroupUUID string + GroupTaskCount int + Args []Arg + Headers Headers + Priority uint8 + Immutable bool + RetryCount int + RetryTimeout int + OnSuccess []*Signature + OnError []*Signature + ChordCallback *Signature + //MessageGroupId for Broker, e.g. SQS + BrokerMessageGroupId string + //ReceiptHandle of SQS Message + SQSReceiptHandle string + // StopTaskDeletionOnError used with sqs when we want to send failed messages to dlq, + // and don't want machinery to delete from source queue + StopTaskDeletionOnError bool + // IgnoreWhenTaskNotRegistered auto removes the request when there is no handeler available + // When this is true a task with no handler will be ignored and not placed back in the queue + IgnoreWhenTaskNotRegistered bool +} + +// NewSignature creates a new task signature +func NewSignature(name string, args []Arg) (*Signature, error) { + signatureID := uuid.New().String() + return &Signature{ + UUID: fmt.Sprintf("task_%v", signatureID), + Name: name, + Args: args, + }, nil +} + +func CopySignatures(signatures ...*Signature) []*Signature { + var sigs = make([]*Signature, len(signatures)) + for index, signature := range signatures { + sigs[index] = CopySignature(signature) + } + return sigs +} + +func CopySignature(signature *Signature) *Signature { + var sig = new(Signature) + _ = utils.DeepCopy(sig, signature) + return sig +} diff --git a/v2/tasks/state.go b/v2/tasks/state.go new file mode 100644 index 000000000..8bd006b54 --- /dev/null +++ b/v2/tasks/state.go @@ -0,0 +1,109 @@ +package tasks + +import "time" + +const ( + // StatePending - initial state of a task + StatePending = "PENDING" + // StateReceived - when task is received by a worker + StateReceived = "RECEIVED" + // StateStarted - when the worker starts processing the task + StateStarted = "STARTED" + // StateRetry - when failed task has been scheduled for retry + StateRetry = "RETRY" + // StateSuccess - when the task is processed successfully + StateSuccess = "SUCCESS" + // StateFailure - when processing of the task fails + StateFailure = "FAILURE" +) + +// TaskState represents a state of a task +type TaskState struct { + TaskUUID string `bson:"_id"` + TaskName string `bson:"task_name"` + State string `bson:"state"` + Results []*TaskResult `bson:"results"` + Error string `bson:"error"` + CreatedAt time.Time `bson:"created_at"` + TTL int64 `bson:"ttl,omitempty"` +} + +// GroupMeta stores useful metadata about tasks within the same group +// E.g. UUIDs of all tasks which are used in order to check if all tasks +// completed successfully or not and thus whether to trigger chord callback +type GroupMeta struct { + GroupUUID string `bson:"_id"` + TaskUUIDs []string `bson:"task_uuids"` + ChordTriggered bool `bson:"chord_triggered"` + Lock bool `bson:"lock"` + CreatedAt time.Time `bson:"created_at"` + TTL int64 `bson:"ttl,omitempty"` +} + +// NewPendingTaskState ... +func NewPendingTaskState(signature *Signature) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + TaskName: signature.Name, + State: StatePending, + CreatedAt: time.Now().UTC(), + } +} + +// NewReceivedTaskState ... +func NewReceivedTaskState(signature *Signature) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + State: StateReceived, + } +} + +// NewStartedTaskState ... +func NewStartedTaskState(signature *Signature) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + State: StateStarted, + } +} + +// NewSuccessTaskState ... +func NewSuccessTaskState(signature *Signature, results []*TaskResult) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + State: StateSuccess, + Results: results, + } +} + +// NewFailureTaskState ... +func NewFailureTaskState(signature *Signature, err string) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + State: StateFailure, + Error: err, + } +} + +// NewRetryTaskState ... +func NewRetryTaskState(signature *Signature) *TaskState { + return &TaskState{ + TaskUUID: signature.UUID, + State: StateRetry, + } +} + +// IsCompleted returns true if state is SUCCESS or FAILURE, +// i.e. the task has finished processing and either succeeded or failed. +func (taskState *TaskState) IsCompleted() bool { + return taskState.IsSuccess() || taskState.IsFailure() +} + +// IsSuccess returns true if state is SUCCESS +func (taskState *TaskState) IsSuccess() bool { + return taskState.State == StateSuccess +} + +// IsFailure returns true if state is FAILURE +func (taskState *TaskState) IsFailure() bool { + return taskState.State == StateFailure +} diff --git a/v2/tasks/state_test.go b/v2/tasks/state_test.go new file mode 100644 index 000000000..9ed0a1d91 --- /dev/null +++ b/v2/tasks/state_test.go @@ -0,0 +1,31 @@ +package tasks_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestTaskStateIsCompleted(t *testing.T) { + t.Parallel() + + taskState := &tasks.TaskState{ + TaskUUID: "taskUUID", + State: tasks.StatePending, + } + + assert.False(t, taskState.IsCompleted()) + + taskState.State = tasks.StateReceived + assert.False(t, taskState.IsCompleted()) + + taskState.State = tasks.StateStarted + assert.False(t, taskState.IsCompleted()) + + taskState.State = tasks.StateSuccess + assert.True(t, taskState.IsCompleted()) + + taskState.State = tasks.StateFailure + assert.True(t, taskState.IsCompleted()) +} diff --git a/v2/tasks/task.go b/v2/tasks/task.go new file mode 100644 index 000000000..c2342fabd --- /dev/null +++ b/v2/tasks/task.go @@ -0,0 +1,201 @@ +package tasks + +import ( + "context" + "errors" + "fmt" + "reflect" + "runtime/debug" + + opentracing "github.com/opentracing/opentracing-go" + opentracing_ext "github.com/opentracing/opentracing-go/ext" + opentracing_log "github.com/opentracing/opentracing-go/log" + + "github.com/RichardKnop/machinery/v2/log" +) + +// ErrTaskPanicked ... +var ErrTaskPanicked = errors.New("Invoking task caused a panic") + +// Task wraps a signature and methods used to reflect task arguments and +// return values after invoking the task +type Task struct { + TaskFunc reflect.Value + UseContext bool + Context context.Context + Args []reflect.Value +} + +type signatureCtxType struct{} + +var signatureCtx signatureCtxType + +// SignatureFromContext gets the signature from the context +func SignatureFromContext(ctx context.Context) *Signature { + if ctx == nil { + return nil + } + + v := ctx.Value(signatureCtx) + if v == nil { + return nil + } + + signature, _ := v.(*Signature) + return signature +} + +// NewWithSignature is the same as New but injects the signature +func NewWithSignature(taskFunc interface{}, signature *Signature) (*Task, error) { + args := signature.Args + ctx := context.Background() + ctx = context.WithValue(ctx, signatureCtx, signature) + task := &Task{ + TaskFunc: reflect.ValueOf(taskFunc), + Context: ctx, + } + + taskFuncType := reflect.TypeOf(taskFunc) + if taskFuncType.NumIn() > 0 { + arg0Type := taskFuncType.In(0) + if IsContextType(arg0Type) { + task.UseContext = true + } + } + + if err := task.ReflectArgs(args); err != nil { + return nil, fmt.Errorf("Reflect task args error: %s", err) + } + + return task, nil +} + +// New tries to use reflection to convert the function and arguments +// into a reflect.Value and prepare it for invocation +func New(taskFunc interface{}, args []Arg) (*Task, error) { + task := &Task{ + TaskFunc: reflect.ValueOf(taskFunc), + Context: context.Background(), + } + + taskFuncType := reflect.TypeOf(taskFunc) + if taskFuncType.NumIn() > 0 { + arg0Type := taskFuncType.In(0) + if IsContextType(arg0Type) { + task.UseContext = true + } + } + + if err := task.ReflectArgs(args); err != nil { + return nil, fmt.Errorf("Reflect task args error: %s", err) + } + + return task, nil +} + +// Call attempts to call the task with the supplied arguments. +// +// `err` is set in the return value in two cases: +// 1. The reflected function invocation panics (e.g. due to a mismatched +// argument list). +// 2. The task func itself returns a non-nil error. +func (t *Task) Call() (taskResults []*TaskResult, err error) { + // retrieve the span from the task's context and finish it as soon as this function returns + if span := opentracing.SpanFromContext(t.Context); span != nil { + defer span.Finish() + } + + defer func() { + // Recover from panic and set err. + if e := recover(); e != nil { + switch e := e.(type) { + default: + err = ErrTaskPanicked + case error: + err = e + case string: + err = errors.New(e) + } + + // mark the span as failed and dump the error and stack trace to the span + if span := opentracing.SpanFromContext(t.Context); span != nil { + opentracing_ext.Error.Set(span, true) + span.LogFields( + opentracing_log.Error(err), + opentracing_log.Object("stack", string(debug.Stack())), + ) + } + + // Print stack trace + log.ERROR.Printf("%s", debug.Stack()) + } + }() + + args := t.Args + + if t.UseContext { + ctxValue := reflect.ValueOf(t.Context) + args = append([]reflect.Value{ctxValue}, args...) + } + + // Invoke the task + results := t.TaskFunc.Call(args) + + // Task must return at least a value + if len(results) == 0 { + return nil, ErrTaskReturnsNoValue + } + + // Last returned value + lastResult := results[len(results)-1] + + // If the last returned value is not nil, it has to be of error type, if that + // is not the case, return error message, otherwise propagate the task error + // to the caller + if !lastResult.IsNil() { + // If the result implements Retriable interface, return instance of Retriable + retriableErrorInterface := reflect.TypeOf((*Retriable)(nil)).Elem() + if lastResult.Type().Implements(retriableErrorInterface) { + return nil, lastResult.Interface().(ErrRetryTaskLater) + } + + // Otherwise, check that the result implements the standard error interface, + // if not, return ErrLastReturnValueMustBeError error + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if !lastResult.Type().Implements(errorInterface) { + return nil, ErrLastReturnValueMustBeError + } + + // Return the standard error + return nil, lastResult.Interface().(error) + } + + // Convert reflect values to task results + taskResults = make([]*TaskResult, len(results)-1) + for i := 0; i < len(results)-1; i++ { + val := results[i].Interface() + typeStr := reflect.TypeOf(val).String() + taskResults[i] = &TaskResult{ + Type: typeStr, + Value: val, + } + } + + return taskResults, err +} + +// ReflectArgs converts []TaskArg to []reflect.Value +func (t *Task) ReflectArgs(args []Arg) error { + argValues := make([]reflect.Value, len(args)) + + for i, arg := range args { + argValue, err := ReflectValue(arg.Type, arg.Value) + if err != nil { + return err + } + argValues[i] = argValue + } + + t.Args = argValues + return nil +} diff --git a/v2/tasks/task_test.go b/v2/tasks/task_test.go new file mode 100644 index 000000000..525f2aaec --- /dev/null +++ b/v2/tasks/task_test.go @@ -0,0 +1,129 @@ +package tasks_test + +import ( + "context" + "errors" + "math" + "testing" + "time" + + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestTaskCallErrorTest(t *testing.T) { + t.Parallel() + + // Create test task that returns tasks.ErrRetryTaskLater error + retriable := func() error { return tasks.NewErrRetryTaskLater("some error", 4*time.Hour) } + + task, err := tasks.New(retriable, []tasks.Arg{}) + assert.NoError(t, err) + + // Invoke TryCall and validate that returned error can be cast to tasks.ErrRetryTaskLater + results, err := task.Call() + assert.Nil(t, results) + assert.NotNil(t, err) + _, ok := interface{}(err).(tasks.ErrRetryTaskLater) + assert.True(t, ok, "Error should be castable to tasks.ErrRetryTaskLater") + + // Create test task that returns a standard error + standard := func() error { return errors.New("some error") } + + task, err = tasks.New(standard, []tasks.Arg{}) + assert.NoError(t, err) + + // Invoke TryCall and validate that returned error is standard + results, err = task.Call() + assert.Nil(t, results) + assert.NotNil(t, err) + assert.Equal(t, "some error", err.Error()) +} + +func TestTaskReflectArgs(t *testing.T) { + t.Parallel() + + task := new(tasks.Task) + args := []tasks.Arg{ + { + Type: "[]int64", + Value: []int64{1, 2}, + }, + } + + err := task.ReflectArgs(args) + assert.NoError(t, err) + assert.Equal(t, 1, len(task.Args)) + assert.Equal(t, "[]int64", task.Args[0].Type().String()) +} + +func TestTaskCallInvalidArgRobustnessError(t *testing.T) { + t.Parallel() + + // Create a test task function + f := func(x int) error { return nil } + + // Construct an invalid argument list and reflect it + args := []tasks.Arg{ + {Type: "bool", Value: true}, + } + + task, err := tasks.New(f, args) + assert.NoError(t, err) + + // Invoke TryCall and validate error handling + results, err := task.Call() + assert.Equal(t, "reflect: Call using bool as type int", err.Error()) + assert.Nil(t, results) +} + +func TestTaskCallInterfaceValuedResult(t *testing.T) { + t.Parallel() + + // Create a test task function + f := func() (interface{}, error) { return math.Pi, nil } + + task, err := tasks.New(f, []tasks.Arg{}) + assert.NoError(t, err) + + taskResults, err := task.Call() + assert.NoError(t, err) + assert.Equal(t, "float64", taskResults[0].Type) + assert.Equal(t, math.Pi, taskResults[0].Value) +} + +func TestTaskCallWithContext(t *testing.T) { + t.Parallel() + + f := func(c context.Context) (interface{}, error) { + assert.NotNil(t, c) + assert.Nil(t, tasks.SignatureFromContext(c)) + return math.Pi, nil + } + task, err := tasks.New(f, []tasks.Arg{}) + assert.NoError(t, err) + taskResults, err := task.Call() + assert.NoError(t, err) + assert.Equal(t, "float64", taskResults[0].Type) + assert.Equal(t, math.Pi, taskResults[0].Value) +} + +func TestTaskCallWithSignatureInContext(t *testing.T) { + t.Parallel() + + f := func(c context.Context) (interface{}, error) { + assert.NotNil(t, c) + signature := tasks.SignatureFromContext(c) + assert.NotNil(t, signature) + assert.Equal(t, "foo", signature.Name) + return math.Pi, nil + } + signature, err := tasks.NewSignature("foo", []tasks.Arg{}) + assert.NoError(t, err) + task, err := tasks.NewWithSignature(f, signature) + assert.NoError(t, err) + taskResults, err := task.Call() + assert.NoError(t, err) + assert.Equal(t, "float64", taskResults[0].Type) + assert.Equal(t, math.Pi, taskResults[0].Value) +} diff --git a/v2/tasks/validate.go b/v2/tasks/validate.go new file mode 100644 index 000000000..32d11f871 --- /dev/null +++ b/v2/tasks/validate.go @@ -0,0 +1,42 @@ +package tasks + +import ( + "errors" + "reflect" +) + +var ( + // ErrTaskMustBeFunc ... + ErrTaskMustBeFunc = errors.New("Task must be a func type") + // ErrTaskReturnsNoValue ... + ErrTaskReturnsNoValue = errors.New("Task must return at least a single value") + // ErrLastReturnValueMustBeError .. + ErrLastReturnValueMustBeError = errors.New("Last return value of a task must be error") +) + +// ValidateTask validates task function using reflection and makes sure +// it has a proper signature. Functions used as tasks must return at least a +// single value and the last return type must be error +func ValidateTask(task interface{}) error { + v := reflect.ValueOf(task) + t := v.Type() + + // Task must be a function + if t.Kind() != reflect.Func { + return ErrTaskMustBeFunc + } + + // Task must return at least a single value + if t.NumOut() < 1 { + return ErrTaskReturnsNoValue + } + + // Last return value must be error + lastReturnType := t.Out(t.NumOut() - 1) + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if !lastReturnType.Implements(errorInterface) { + return ErrLastReturnValueMustBeError + } + + return nil +} diff --git a/v2/tasks/validate_test.go b/v2/tasks/validate_test.go new file mode 100644 index 000000000..c47a227ac --- /dev/null +++ b/v2/tasks/validate_test.go @@ -0,0 +1,32 @@ +package tasks_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestValidateTask(t *testing.T) { + t.Parallel() + + type someStruct struct{} + var ( + taskOfWrongType = new(someStruct) + taskWithoutReturnValue = func() {} + taskWithoutErrorAsLastReturnValue = func() int { return 0 } + validTask = func(arg string) error { return nil } + ) + + err := tasks.ValidateTask(taskOfWrongType) + assert.Equal(t, tasks.ErrTaskMustBeFunc, err) + + err = tasks.ValidateTask(taskWithoutReturnValue) + assert.Equal(t, tasks.ErrTaskReturnsNoValue, err) + + err = tasks.ValidateTask(taskWithoutErrorAsLastReturnValue) + assert.Equal(t, tasks.ErrLastReturnValueMustBeError, err) + + err = tasks.ValidateTask(validTask) + assert.NoError(t, err) +} diff --git a/v2/tasks/workflow.go b/v2/tasks/workflow.go new file mode 100644 index 000000000..38a786461 --- /dev/null +++ b/v2/tasks/workflow.go @@ -0,0 +1,95 @@ +package tasks + +import ( + "fmt" + + "github.com/google/uuid" +) + +// Chain creates a chain of tasks to be executed one after another +type Chain struct { + Tasks []*Signature +} + +// Group creates a set of tasks to be executed in parallel +type Group struct { + GroupUUID string + Tasks []*Signature +} + +// Chord adds an optional callback to the group to be executed +// after all tasks in the group finished +type Chord struct { + Group *Group + Callback *Signature +} + +// GetUUIDs returns slice of task UUIDS +func (group *Group) GetUUIDs() []string { + taskUUIDs := make([]string, len(group.Tasks)) + for i, signature := range group.Tasks { + taskUUIDs[i] = signature.UUID + } + return taskUUIDs +} + +// NewChain creates a new chain of tasks to be processed one by one, passing +// results unless task signatures are set to be immutable +func NewChain(signatures ...*Signature) (*Chain, error) { + // Auto generate task UUIDs if needed + for _, signature := range signatures { + if signature.UUID == "" { + signatureID := uuid.New().String() + signature.UUID = fmt.Sprintf("task_%v", signatureID) + } + } + + for i := len(signatures) - 1; i > 0; i-- { + if i > 0 { + signatures[i-1].OnSuccess = []*Signature{signatures[i]} + } + } + + chain := &Chain{Tasks: signatures} + + return chain, nil +} + +// NewGroup creates a new group of tasks to be processed in parallel +func NewGroup(signatures ...*Signature) (*Group, error) { + // Generate a group UUID + groupUUID := uuid.New().String() + groupID := fmt.Sprintf("group_%v", groupUUID) + + // Auto generate task UUIDs if needed, group tasks by common group UUID + for _, signature := range signatures { + if signature.UUID == "" { + signatureID := uuid.New().String() + signature.UUID = fmt.Sprintf("task_%v", signatureID) + } + signature.GroupUUID = groupID + signature.GroupTaskCount = len(signatures) + } + + return &Group{ + GroupUUID: groupID, + Tasks: signatures, + }, nil +} + +// NewChord creates a new chord (a group of tasks with a single callback +// to be executed after all tasks in the group has completed) +func NewChord(group *Group, callback *Signature) (*Chord, error) { + if callback.UUID == "" { + // Generate a UUID for the chord callback + callbackUUID := uuid.New().String() + callback.UUID = fmt.Sprintf("chord_%v", callbackUUID) + } + + // Add a chord callback to all tasks + for _, signature := range group.Tasks { + signature.ChordCallback = callback + } + + return &Chord{Group: group, Callback: callback}, nil +} diff --git a/v2/tasks/workflow_test.go b/v2/tasks/workflow_test.go new file mode 100644 index 000000000..12bb3fd1b --- /dev/null +++ b/v2/tasks/workflow_test.go @@ -0,0 +1,61 @@ +package tasks_test + +import ( + "testing" + + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/stretchr/testify/assert" +) + +func TestNewChain(t *testing.T) { + t.Parallel() + + task1 := tasks.Signature{ + Name: "foo", + Args: []tasks.Arg{ + { + Type: "float64", + Value: interface{}(1), + }, + { + Type: "float64", + Value: interface{}(1), + }, + }, + } + + task2 := tasks.Signature{ + Name: "bar", + Args: []tasks.Arg{ + { + Type: "float64", + Value: interface{}(5), + }, + { + Type: "float64", + Value: interface{}(6), + }, + }, + } + + task3 := tasks.Signature{ + Name: "qux", + Args: []tasks.Arg{ + { + Type: "float64", + Value: interface{}(4), + }, + }, + } + + chain, err := tasks.NewChain(&task1, &task2, &task3) + if err != nil { + t.Fatal(err) + } + + firstTask := chain.Tasks[0] + + assert.Equal(t, "foo", firstTask.Name) + assert.Equal(t, "bar", firstTask.OnSuccess[0].Name) + assert.Equal(t, "qux", firstTask.OnSuccess[0].OnSuccess[0].Name) +} diff --git a/v2/tracing/tracing.go b/v2/tracing/tracing.go new file mode 100644 index 000000000..3f7c59bb1 --- /dev/null +++ b/v2/tracing/tracing.go @@ -0,0 +1,141 @@ +package tracing + +import ( + "encoding/json" + + "github.com/RichardKnop/machinery/v2/tasks" + + opentracing "github.com/opentracing/opentracing-go" + opentracing_ext "github.com/opentracing/opentracing-go/ext" + opentracing_log "github.com/opentracing/opentracing-go/log" +) + +// opentracing tags +var ( + MachineryTag = opentracing.Tag{Key: string(opentracing_ext.Component), Value: "machinery"} + WorkflowGroupTag = opentracing.Tag{Key: "machinery.workflow", Value: "group"} + WorkflowChordTag = opentracing.Tag{Key: "machinery.workflow", Value: "chord"} + WorkflowChainTag = opentracing.Tag{Key: "machinery.workflow", Value: "chain"} +) + +// StartSpanFromHeaders will extract a span from the signature headers +// and start a new span with the given operation name. +func StartSpanFromHeaders(headers tasks.Headers, operationName string) opentracing.Span { + // Try to extract the span context from the carrier. + spanContext, err := opentracing.GlobalTracer().Extract(opentracing.TextMap, headers) + + // Create a new span from the span context if found or start a new trace with the function name. + // For clarity add the machinery component tag. + span := opentracing.StartSpan( + operationName, + ConsumerOption(spanContext), + MachineryTag, + ) + + // Log any error but don't fail + if err != nil { + span.LogFields(opentracing_log.Error(err)) + } + + return span +} + +// HeadersWithSpan will inject a span into the signature headers +func HeadersWithSpan(headers tasks.Headers, span opentracing.Span) tasks.Headers { + // check if the headers aren't nil + if headers == nil { + headers = make(tasks.Headers) + } + + if err := opentracing.GlobalTracer().Inject(span.Context(), opentracing.TextMap, headers); err != nil { + span.LogFields(opentracing_log.Error(err)) + } + + return headers +} + +type consumerOption struct { + producerContext opentracing.SpanContext +} + +func (c consumerOption) Apply(o *opentracing.StartSpanOptions) { + if c.producerContext != nil { + opentracing.FollowsFrom(c.producerContext).Apply(o) + } + opentracing_ext.SpanKindConsumer.Apply(o) +} + +// ConsumerOption ... +func ConsumerOption(producer opentracing.SpanContext) opentracing.StartSpanOption { + return consumerOption{producer} +} + +type producerOption struct{} + +func (p producerOption) Apply(o *opentracing.StartSpanOptions) { + opentracing_ext.SpanKindProducer.Apply(o) +} + +// ProducerOption ... +func ProducerOption() opentracing.StartSpanOption { + return producerOption{} +} + +// AnnotateSpanWithSignatureInfo ... +func AnnotateSpanWithSignatureInfo(span opentracing.Span, signature *tasks.Signature) { + // tag the span with some info about the signature + span.SetTag("signature.name", signature.Name) + span.SetTag("signature.uuid", signature.UUID) + + if signature.GroupUUID != "" { + span.SetTag("signature.group.uuid", signature.GroupUUID) + } + + if signature.ChordCallback != nil { + span.SetTag("signature.chord.callback.uuid", signature.ChordCallback.UUID) + span.SetTag("signature.chord.callback.name", signature.ChordCallback.Name) + } +} + +// AnnotateSpanWithChainInfo ... +func AnnotateSpanWithChainInfo(span opentracing.Span, chain *tasks.Chain) { + // tag the span with some info about the chain + span.SetTag("chain.tasks.length", len(chain.Tasks)) + + // inject the tracing span into the tasks signature headers + for _, signature := range chain.Tasks { + signature.Headers = HeadersWithSpan(signature.Headers, span) + } +} + +// AnnotateSpanWithGroupInfo ... +func AnnotateSpanWithGroupInfo(span opentracing.Span, group *tasks.Group, sendConcurrency int) { + // tag the span with some info about the group + span.SetTag("group.uuid", group.GroupUUID) + span.SetTag("group.tasks.length", len(group.Tasks)) + span.SetTag("group.concurrency", sendConcurrency) + + // encode the task uuids to json, if that fails just dump it in + if taskUUIDs, err := json.Marshal(group.GetUUIDs()); err == nil { + span.SetTag("group.tasks", string(taskUUIDs)) + } else { + span.SetTag("group.tasks", group.GetUUIDs()) + } + + // inject the tracing span into the tasks signature headers + for _, signature := range group.Tasks { + signature.Headers = HeadersWithSpan(signature.Headers, span) + } +} + +// AnnotateSpanWithChordInfo ... +func AnnotateSpanWithChordInfo(span opentracing.Span, chord *tasks.Chord, sendConcurrency int) { + // tag the span with chord specific info + span.SetTag("chord.callback.uuid", chord.Callback.UUID) + + // inject the tracing span into the callback signature + chord.Callback.Headers = HeadersWithSpan(chord.Callback.Headers, span) + + // tag the span for the group part of the chord + AnnotateSpanWithGroupInfo(span, chord.Group, sendConcurrency) +} diff --git a/v2/utils/deepcopy.go b/v2/utils/deepcopy.go new file mode 100644 index 000000000..2c95c14a0 --- /dev/null +++ b/v2/utils/deepcopy.go @@ -0,0 +1,83 @@ +package utils + +import ( + "errors" + "reflect" +) + +var ( + ErrNoMatchType = errors.New("no match type") + ErrNoPointer = errors.New("must be interface") + ErrInvalidArgument = errors.New("invalid arguments") +) + +func deepCopy(dst, src reflect.Value) { + switch src.Kind() { + case reflect.Interface: + value := src.Elem() + if !value.IsValid() { + return + } + newValue := reflect.New(value.Type()).Elem() + deepCopy(newValue, value) + dst.Set(newValue) + case reflect.Ptr: + value := src.Elem() + if !value.IsValid() { + return + } + dst.Set(reflect.New(value.Type())) + deepCopy(dst.Elem(), value) + case reflect.Map: + dst.Set(reflect.MakeMap(src.Type())) + keys := src.MapKeys() + for _, key := range keys { + value := src.MapIndex(key) + newValue := reflect.New(value.Type()).Elem() + deepCopy(newValue, value) + dst.SetMapIndex(key, newValue) + } + case reflect.Slice: + dst.Set(reflect.MakeSlice(src.Type(), src.Len(), src.Cap())) + for i := 0; i < src.Len(); i++ { + deepCopy(dst.Index(i), src.Index(i)) + } + case reflect.Struct: + typeSrc := src.Type() + for i := 0; i < src.NumField(); i++ { + value := src.Field(i) + tag := typeSrc.Field(i).Tag + if value.CanSet() && tag.Get("deepcopy") != "-" { + deepCopy(dst.Field(i), value) + } + } + default: + dst.Set(src) + } +} + +func DeepCopy(dst, src interface{}) error { + typeDst := reflect.TypeOf(dst) + typeSrc := reflect.TypeOf(src) + if typeDst != typeSrc { + return ErrNoMatchType + } + if typeSrc.Kind() != reflect.Ptr { + return ErrNoPointer + } + + valueDst := reflect.ValueOf(dst).Elem() + valueSrc := reflect.ValueOf(src).Elem() + if !valueDst.IsValid() || !valueSrc.IsValid() { + return ErrInvalidArgument + } + + deepCopy(valueDst, valueSrc) + return nil +} + +func DeepClone(v interface{}) interface{} { + dst := reflect.New(reflect.TypeOf(v)).Elem() + deepCopy(dst, reflect.ValueOf(v)) + return dst.Interface() +} diff --git a/v2/utils/deepcopy_test.go b/v2/utils/deepcopy_test.go new file mode 100644 index 000000000..8a70524c6 --- /dev/null +++ b/v2/utils/deepcopy_test.go @@ -0,0 +1,32 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeepCopy(t *testing.T) { + t.Parallel() + + type s struct { + A float64 + B int + C []int + D *int + E map[string]int + } + var d = 3 + var dst = new(s) + var src = s{1.0, 1, []int{1, 2, 3}, &d, map[string]int{"a": 1}} + + err := DeepCopy(dst, &src) + src.A = 2 + + assert.NoError(t, err) + assert.Equal(t, 1.0, dst.A) + assert.Equal(t, 1, dst.B) + assert.Equal(t, []int{1, 2, 3}, dst.C) + assert.Equal(t, &d, dst.D) + assert.Equal(t, map[string]int{"a": 1}, dst.E) +} diff --git a/v2/utils/utils.go b/v2/utils/utils.go new file mode 100644 index 000000000..2c2274095 --- /dev/null +++ b/v2/utils/utils.go @@ -0,0 +1,14 @@ +package utils + +import ( + "os" + "path/filepath" +) + +const ( + LockKeyPrefix = "machinery_lock_" +) + +func GetLockName(name, spec string) string { + return LockKeyPrefix + filepath.Base(os.Args[0]) + name + spec +} diff --git a/v2/utils/utils_test.go b/v2/utils/utils_test.go new file mode 100644 index 000000000..d4cbb05c6 --- /dev/null +++ b/v2/utils/utils_test.go @@ -0,0 +1,14 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLockName(t *testing.T) { + t.Parallel() + + lockName := GetLockName("test", "*/3 * * *") + assert.Equal(t, "machinery_lock_utils.testtest*/3 * * *", lockName) +} diff --git a/v2/utils/uuid.go b/v2/utils/uuid.go new file mode 100644 index 000000000..738c9a788 --- /dev/null +++ b/v2/utils/uuid.go @@ -0,0 +1,11 @@ +package utils + +import ( + "github.com/google/uuid" + "strings" +) + +func GetPureUUID() string { + uid, _ := uuid.NewUUID() + return strings.Replace(uid.String(), "-", "", -1) +} diff --git a/v2/utils/uuid_test.go b/v2/utils/uuid_test.go new file mode 100644 index 000000000..2d9968991 --- /dev/null +++ b/v2/utils/uuid_test.go @@ -0,0 +1,13 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetPureUUID(t *testing.T) { + t.Parallel() + + assert.Len(t, GetPureUUID(), 32) +} diff --git a/v2/worker.go b/v2/worker.go index ef6b10bd8..ccfc96602 100644 --- a/v2/worker.go +++ b/v2/worker.go @@ -12,12 +12,12 @@ import ( "github.com/opentracing/opentracing-go" - "github.com/RichardKnop/machinery/v1/backends/amqp" - "github.com/RichardKnop/machinery/v1/brokers/errs" - "github.com/RichardKnop/machinery/v1/log" - "github.com/RichardKnop/machinery/v1/retry" - "github.com/RichardKnop/machinery/v1/tasks" - "github.com/RichardKnop/machinery/v1/tracing" + "github.com/RichardKnop/machinery/v2/backends/amqp" + "github.com/RichardKnop/machinery/v2/brokers/errs" + "github.com/RichardKnop/machinery/v2/log" + "github.com/RichardKnop/machinery/v2/retry" + "github.com/RichardKnop/machinery/v2/tasks" + "github.com/RichardKnop/machinery/v2/tracing" ) // Worker represents a single worker process