diff --git a/da_manager_example/README.md b/da_manager_example/README.md new file mode 100644 index 000000000000..e702205b7260 --- /dev/null +++ b/da_manager_example/README.md @@ -0,0 +1,23 @@ +# DA Manager + +A `DA Manager` is introduced, a component that periodically queries for new batches and uploads their `pubdata` to some +data availability solution, as a separate binary. The way this works is the following: + +- A new RPC endpoint is added to the `zk server`, called `zks_getL1BatchPubdata`, that takes a batch number or hash as a + parameter and returns the `pubdata` for that batch appropriately encoded as an array of bytes. +- To construct the `pubdata`, the operator will query Postgres for the four components of pubdata from the `l1_batches` + table (meaning that there’s no need to modify or add any schemas), then convert it into its byte representation using + the logic in the `pub fn construct_pubdata(&self) -> Vec` method of the `L1BatchWithMetadata` struct. +- A user wanting to incorporate a new DA solution simply writes their own DA manager in the programming language they + prefer, as an application that uses the endpoint introduced above to periodically query for new `pubdata` and post it + to the DA server. For this scenario, we should provide some examples showing what you should implement for your own DA + solution. +- This `DA Manager` runs independently from the operator, i.e. as an entirely separate `OS` process or even on a + different machine (in particular, it’s independent from both the `eth_tx_aggregator` and the `eth_tx_manager`) + +![DA Manager](assets/da_manager.png) + +## How to run + +1. Run the ZK Stack +2. Run `python3 da_manager_example/main.py` diff --git a/da_manager_example/assets/da_manager.png b/da_manager_example/assets/da_manager.png new file mode 100644 index 000000000000..a8eab8ec496d Binary files /dev/null and b/da_manager_example/assets/da_manager.png differ diff --git a/da_manager_example/data/pubdata_storage.json b/da_manager_example/data/pubdata_storage.json new file mode 100644 index 000000000000..9e26dfeeb6e6 --- /dev/null +++ b/da_manager_example/data/pubdata_storage.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/da_manager_example/main.py b/da_manager_example/main.py new file mode 100644 index 000000000000..a82ef2e97049 --- /dev/null +++ b/da_manager_example/main.py @@ -0,0 +1,39 @@ +import requests +import json +import time + +L2_URL = 'http://localhost:3050' +DB_PATH = 'da_manager_example/data/pubdata_storage.json' + +def get_batch_pubdata(url, batch_number): + headers = {"Content-Type": "application/json"} + data = {"jsonrpc": "2.0", "id": 1, "method": "zks_getL1BatchPubdata", "params": [batch_number]} + response = requests.post(url, headers=headers, data=json.dumps(data)) + return response.json()["result"] + +def store_batch_pubdata(pubdata_storage, stored_pubdata, pubdata, batch_number): + stored_pubdata[batch_number] = pubdata + pubdata_storage.seek(0) + json.dump(stored_pubdata, pubdata_storage) + pubdata_storage.truncate() + +def main(): + with open(DB_PATH, "r+") as pubdata_storage: + stored_pubdata = json.load(pubdata_storage) + starting_batch_id = len(stored_pubdata.keys()) + 1 + print(f"Starting from batch #{starting_batch_id}") + while True: + try: + l1_batch_pubdata = get_batch_pubdata(L2_URL, starting_batch_id) + store_batch_pubdata(pubdata_storage, stored_pubdata, l1_batch_pubdata, starting_batch_id) + print(f"Got batch #{starting_batch_id} pubdata") + except: + print(f"Failed to get batch #{starting_batch_id} pubdata") + print("Retrying in 60 seconds") + time.sleep(60) + continue + starting_batch_id += 1 + time.sleep(5) + +if __name__ == '__main__': + main() \ No newline at end of file