> stereotype
diff --git a/docs/misc/deloitte/supply-chain.png b/docs/misc/deloitte/supply-chain.png
deleted file mode 100644
index 3dd76383c3..0000000000
Binary files a/docs/misc/deloitte/supply-chain.png and /dev/null differ
diff --git a/docs/misc/deloitte/supply-chain.puml b/docs/misc/deloitte/supply-chain.puml
deleted file mode 100644
index 1231e74569..0000000000
--- a/docs/misc/deloitte/supply-chain.puml
+++ /dev/null
@@ -1,51 +0,0 @@
-@startuml
-title Supply Chain
-
-actor "Government" as Government
-actor "Manufacturer" as Manufacturer
-database "Blockchain" as Blockchain
-actor "Authorized Distributor" as Distributor
-actor "Retailer" as Retailer
-actor "Consumer" as Consumer
-actor "Illegal Retailer" as IllegalSeller
-
-Manufacturer -> Government : Applies for Excise license
-Government -> Blockchain : Issues a license to the Manufacturer\n as a verifiable credential
-Government -> Manufacturer : Excise license
-
-Manufacturer -> Government : Raises and order for a batch of Products
-Government -> Blockchain : Approves the Manufacturer's order\n including a list of Unique\n Identifiers (UID) for each product
-Government -> Manufacturer : Sends the UIDs
-Manufacturer -> Manufacturer : Delivers the UIDs to the secure Scan Trust Server
-Manufacturer -> Manufacturer : Prints custom labels during production\n associating the unique product identifier (PID)
-Manufacturer -> Blockchain : Posts associated product and manufacturing data\n Products are put into a “packaged” state
-
-Manufacturer -> Blockchain : The custody state is set to the\n “authorized distributor” and state is set to “shipped”
-Manufacturer -> Distributor : Ships the product
-
-Distributor -> Retailer : Ships products
-Distributor -> Blockchain : The custody state is set to the\n “retailer name” and state is set to “in store”
-
-Consumer -> Retailer : Picks a product and scan its QR Code
-Consumer -> Blockchain : Gets product details
-Blockchain -> Consumer : The product is authentic
-Consumer -> Retailer : Buys the product
-Retailer -> Blockchain : The custody state is set to the\n “Customer” and state is set to “sold”
-
-== When the Manufacturer cheats ==
-note right of Manufacturer : Assume the manufacturer attaches the same\n QR Code to several products\n which eventually get to the black market
-Consumer -> IllegalSeller : Picks a product and scan its QR Code
-Consumer -> Blockchain : Gets product details
-Blockchain -> Consumer : The product is not authentic\n the authentic one is with this another Retailer
-Consumer -> Retailer : Rejects to buy the product
-
-== When the producs get stolen ==
-note over Distributor : The Manufacturer shipped the products\n but they get stolen from the Distributor
-Distributor -> Blockchain : Marks the products as stolen
-Distributor -> Retailer : Notifies the event
-Distributor -> Manufacturer : Notifies the event
-Consumer -> IllegalSeller : Picks a product and scan its QR Code
-Consumer -> Blockchain : Gets product details
-Blockchain -> Consumer : The product was stolen
-Consumer -> IllegalSeller : Rejects to buy the product
-@enduml
diff --git a/docs/misc/wmc/prism_architecture.png b/docs/misc/wmc/prism_architecture.png
deleted file mode 100644
index e67df1743f..0000000000
Binary files a/docs/misc/wmc/prism_architecture.png and /dev/null differ
diff --git a/docs/misc/wmc/prism_architecture.puml b/docs/misc/wmc/prism_architecture.puml
deleted file mode 100644
index e5b95d1a39..0000000000
--- a/docs/misc/wmc/prism_architecture.puml
+++ /dev/null
@@ -1,69 +0,0 @@
-@startuml
-!include ../common/C4_header.puml
-
-Title Atala PRISM
-
-cloud "Cardano Mainnet" as Cardano_Mainnet {
- System_Boundary(Cardano_Boundary, "Cardano") {
- Container(Cloud_Cardano_Node, "Cardano Node", "Cardano", "Blockchain")
- }
-}
-
-System_Boundary(PRISM, "PRISM") {
- Container_Boundary(Node_Boundary, "Node Service") {
- ContainerDb(PRISM_Node_Db, "PRISM Node Db", "PRISM", "Slayer Indexing DB")
- Container(PRISM_Node, "PRISM Node", "PRISM", "Slayer Protocol API")
-
- Container(Cardano_Wallet_Backend, "Cardano Wallet Backend", "Cardano", "Write Path")
- ContainerDb(Cardano_Db_Sync, "Cardano DB Sync", "Cardano", "Read Path")
-
- Container(Cardano_Node, "Cardano Node", "Cardano", "Blockchain")
-
- Cardano_Wallet_Backend -down-> Cardano_Node
- Cardano_Db_Sync -down-> Cardano_Node
- }
-
- PRISM_Node -right-> PRISM_Node_Db
- PRISM_Node -down-> Cardano_Wallet_Backend
- PRISM_Node -down-> Cardano_Db_Sync
- Cardano_Node -right-> Cardano_Mainnet
-
- Container_Boundary(Vault_Boundary, "Vault Service") {
- Container(Vault, "Vault", "PRISM", "Vault API")
- Container(Vault_Db, "Vault Db", "PRISM", "Vault Storage")
-
- Vault -down-> Vault_Db
- }
-
- Container_Boundary(Connector_Boundary, "Connector Service") {
- Container(Connector, "Connector", "PRISM", "P2P Channel API")
- ContainerDb(Connector_Db, "Connector Db", "PRISM", "P2P Mailbox")
-
- Connector -down-> Connector_Db
- }
-
-
-
- Connector -right-> PRISM_Node
-
- Container_Boundary(MC, "Management Console [MC]") {
- Container(MC_Frontend, "MC Frontend", "PRISM", "MC App, Browser Wallet")
- Container(MC_Backend, "MC Backend", "PRISM", "MC API")
- ContainerDb(MC_Db, "MC Db", "PRISM", "Credentials Store")
-
- MC_Backend -up-> MC_Db
- MC_Backend -down-> Connector
- MC_Frontend -left-> MC_Backend
- }
-
- Container(Mobile_Wallet, "Mobile Wallet", "PRISM", "Android, iOS")
- Mobile_Wallet ---> Connector
- Mobile_Wallet ---> PRISM_Node
- Mobile_Wallet -up-> Vault
-
- Container(Browser_Wallet, "Browser Wallet", "PRISM", "Chrome, Firefox")
- MC_Frontend -right-> Browser_Wallet
- Browser_Wallet ---> PRISM_Node
- Browser_Wallet -> Connector
-}
-@enduml
\ No newline at end of file
diff --git a/docs/moe/full-offline-mode.md b/docs/moe/full-offline-mode.md
deleted file mode 100644
index 1f64473168..0000000000
--- a/docs/moe/full-offline-mode.md
+++ /dev/null
@@ -1,324 +0,0 @@
-# PRISM while Cardano is offline
-**WARNING: What is described in this document will not be implemented, and it is kept for historical records in case we want to revisit full offline mode capabilities again in the future**
-
-This analyzes the different approach to keep PRISM working when Cardano is isolated from the rest of the world.
-
-## Definitions
-- **PRISM Node**: An application that follows the Slayer protocol. It sends transactions with metadata to the Cardano blockchain, read transactions confirmed by Cardano nodes, and interprets the relevant information from metadata in compliance with protocol rules.
-- **Atala operation**: Metadata attached to a Cardano transaction, it is used to codify protocol events like Create DID, Issue Credential, etc. Look into the Slayer protocol documentation for more details.
-- **Valid Atala operation**: An Atala operation that does not have any conflicts with the current state of PRISM Node.
-- **Atala transaction**: A Cardano transaction with a list of Atala operations in its metadata.
-- **Cardano confirmation**: A process in which Cardano network verifies the Cardano transaction using a proof-of-stake consensus mechanism.
-- **Confirmed Cardano transaction**: A transaction that has received a sufficient number of Cardano confirmations (defined externally, usually 6).
-- **Confirmed Atala transaction**: An Atala transaction that is a confirmed Cardano transaction.
-- **Confirmed Atala operation**: An Atala operation that is a part of any confirmed Atala transaction.
-- **Unconfirmed Atala operation**: An Atala operation that is not a part of any confirmed Atala transaction.
-
-We will also use Atala operation and Atala object interchangeably in this document even though, technically speaking, Atala object can contain several operations. The notion of Atala objects is more useful while talking about the internal PRISM Node's design, while the notion of Atala operation is more useful from the end-user's perspective.
-
-## Background
-In the MoE deployment, the government can take the internet down for several weeks, when this happens, services running by the government can still communicate to each other, but, no service can communicate to the outside world.
-
-As all the PRISM services will be run by the government, it means that PRISM services can still communicate with each other (let it be connector/console/node/cardano/etc), which allows people to keep using the system for most operations, including the interactions with the node. Of course, no new transactions will be pushed to Cardano neither we'll get new operations from Cardano, this is because our Cardano instance is now disconnected from other Cardano nodes.
-
-The PRISM Node depends on the Cardano network to push changes to it (creating a DID, publishing credentials, etc), as well as to pull such changes from it once new Cardano transactions get confirmed.
-
-The goal is that PRISM still works when Cardano is disconnected, from now, let's call it the `offline mode`, which mostly affects the PRISM Node because that's the only component interacting with Cardano. From now, everything will relate to the PRISM Node.
-
-## Offline mode
-
-### Current state
-The Offline mode works in this way:
-- Publishing anything that interacts with Cardano leaves the transaction in the Cardano mempool indefinitely, which is likely going to fail (we haven't tested nor prepared for it).
-- Credentials/DIDs that are created/updated in confirmed Atala transactions can be resolved.
-- A credential can be verified as long as the relevant data is in confirmed Atala objects.
-- Atala operations are only validated once they have been confirmed by Cardano.
-- Cardano is used a logical clock for sorting Atala operations (i.e. there is a happened-before relation between any two Atala operations).
-
-### Goal
-We expect PRISM to be fully functional in offline mode, even if we pull new confirmed Atala transactions later when PRISM gets online.
-
-#### Publishing data
-- Since we cannot use Cardano as a logic clock while in offline mode, an order for unconfirmed Atala operations will have to be defined.
-- A new unconfirmed Atala operation needs to be validated both in respect of the confirmed Atala operations and the unconfirmed Atala operations that were published before it.
-- Unconfirmed Atala operations should be persistent and be able to survive system restart.
-- An unconfirmed Atala operation that was successfully validated (and hence potentially used by end-users) while PRISM Node was in offline mode is **not** guaranteed to be valid after PRISM Node goes back online. However, everything feasible from the PRISM Node's perspective should be done to confirm all unconfirmed Atala operations.
-
-#### Resolving Credentials/DIDs
-- Resolving data should have access to both: operations that have been confirmed by Cardano, and operations published during offline mode.
-- Resolving data should indicate whether the relevant operations have been confirmed by Cardano or not.
-
-#### Verifying a credential
-Let's define the new verification process in terms of the existing verification (e-verification/e-verified) process. A credential should be reported as:
-- **Verified** if
- * it can be e-verified by using exclusively confirmed Atala operations
- * it can be e-verified by using the union of confirmed and unconfirmed Atala operations
-- **Revoked** if
- * it is considered revoked according to the e-verification process on exclusively confirmed Atala operations
- * it is considered revoked according to the e-verification process on the union of confirmed and unconfirmed Atala operations
-- **Temporarily revoked** if:
- * it can be e-verified by using exclusively confirmed Atala operations
- * it is considered revoked according to the e-verification process on the union of confirmed and unconfirmed Atala operations
-- **Temporarily verified** if:
- * it does not exist according to the e-verification process on exclusively confirmed Atala operations
- * it can be e-verified by using the union of confirmed and unconfirmed Atala operations
-- **Nonexistent** otherwise
-
-The potential unreliability of temporary states must be clearly relayed to the end-users.
-
-*Proposal*: We could define a threshold to consider rejecting any unconfirmed Atala operations that are too far away from the last point of synchronization with Cardano. For example, verifying a credential that was just issued via an unconfirmed Atala operation could succeed (with a warning), while verifying a credential that has been issued a month ago could fail.
-
-
-## Proposal
-Update `node` to support the offline mode, by keeping two states (state include a list of created DIDs, a list of issued credentials etc):
-- **Confirmed state** that holds PRISM Node's state as defined by an ordered list of confirmed Atala objects.
-- **Unconfirmed state** that holds PRISM Node's state as defined by an ordered list of unconfirmed Atala objects.
-
-Let's also define **total state** as a union of both confirmed and unconfirmed state.
-
-Each unconfirmed Atala object has `atala_object_id` that distinguishes it and a status associated with it:
- * `UNKNOWN` for objects that do not exist.
- * `RECEIVED` for objects that have just been received, and they are not reflected in either of the states.
- * `REJECTED` for objects that are incompatible with the total state.
- * `TEMPORARILY_APPLIED` for objects that are now a part of unconfirmed state.
- * `APPLIED` for objects that have been confirmed and now a part of confirmed state.
-
-The diagram below shows the status flow (validation and total state will be defined later in the document):
-![Operation Status Flow](../new-diagrams/node-operation-status-flow.png)
-
-### PRISM Node API
-Right now, any API that publishes data to Cardano follows this path:
-1. The request is received, and then, stored in the `atala_objects` table.
-2. The Atala object gets published to Cardano, the submission details are stored in the `atala_object_tx_submissions` table.
-3. A transaction id is returned to the client invoking the request.
-
-Remarks:
-1. The state is not updated until the Cardano transaction gets confirmed.
-2. Once the Cardano transaction is confirmed, the state is updated by applying the Atala Objects found in the transaction.
-3. The returned transaction id **is not final**, if the transaction gets retried, the transaction id changes, leaving the client with a non-existent transaction id.
-4. Every confirmed Atala Object gets processed sequentially, preventing race conditions.
-
-Taking the unconfirmed state into consideration, the proposed path becomes:
-1. The request is received, and then, stored in the `atala_objects` table.
-2. The Atala Object **does not get** published to Cardano (changed).
-3. The client gets back the `atala_object_id`, which is used to monitor the object state.
-4. The Atala Objects Background Processor (defined later) will process the Atala object asynchronously, updating PRISM Node's state accordingly.
-5. The Atala Objects Transaction Publisher (defined later) eventually (regardless of whether we have internet connectivity at the moment of receiving the request) publishes the Atala object to Cardano ledger.
-
-Remarks:
-1. The flow's complexity is delegated to the new Atala Objects Background Processor, which is defined later.
-
-#### gRPC API for Publishing Atala Operations
-Let us examine how gRPC API is going to change according to this proposal. We will take `CreateDID` RPC as an example. This is what its definition looks like right now:
-
-```proto
-service NodeService {
- rpc CreateDID(CreateDIDRequest) returns (CreateDIDResponse) {}
-}
-
-message CreateDIDRequest {
- SignedAtalaOperation signed_operation = 1; // The signed operation.
-}
-
-message CreateDIDResponse {
- string did = 1; // DID suffix
- TransactionInfo transaction_info = 2; // The on-chain transaction info where the DID is going to be created.
-}
-```
-
-The RPC itself and the request definitions will be left the same, but, since we need to make transaction-related information non-mandatory in the API, the response will be changed to look like this:
-
-```proto
-message CreateDIDResponse {
- string did = 1; // DID suffix
- string operation_id = 2; // Operation id that identifies the published operation and can be used to track the operation status
-}
-```
-
-All other RPCs that publish new Atala operations (`UpdateDID`, `IssueCredentialBatch`, `RevokeCredentials`) will be changed analogously.
-
-#### gRPC API for Tracking Operation Status
-This is what is being used to track transaction statuses right now:
-
-```proto
-service NodeService {
- rpc GetTransactionStatus(GetTransactionStatusRequest) returns (GetTransactionStatusResponse) {}
-}
-```
-
-As was mentioned before, we try to no longer expose transactions in the API if inapplicable, so this RPC will be replaced with the following one:
-
-```proto
-service NodeService {
- rpc GetOperationState(GetOperationStateRequest) returns (GetOperationStateResponse) {}
-}
-
-message GetOperationStateRequest {
- string operation_id = 1; // Operation id that was returned on operation submission
-}
-
-message GetOperationStateResponse {
- OperationState state = 1;
-}
-
-message OperationState {
- oneof result {
- UnknownOperationState unknown_operation_state = 1; // The operation is not a known PRISM Node operation.
- ReceivedOperationState received_operation_state = 2; // The operation has been accepted by PRISM Node, but has not been processed yet.
- RejectedOperationState rejected_operation_state = 3; // The operation has been processed by PRISM Node, but deemed to be invalid.
- TemporarilyAppliedOperationState temporarily_applied_operation_state = 4; // The operation has been processed by PRISM Node, but has not been confirmed by the blockchain yet.
- AppliedOperationState applied_operation_state = 5; // The operation has been confirmed by PRISM Node and the underlying blockchain; the operation is now considered immutable.
- }
-}
-
-message UnknownOperationState {}
-message ReceivedOperationState {
- google.protobuf.Timestamp received_at = 1;
-}
-message RejectedOperationState {
- google.protobuf.Timestamp rejected_at = 1;
- string reason = 2;
-}
-message TemporarilyAppliedOperationState {
- google.protobuf.Timestamp temporarily_applied_at = 1;
-}
-message AppliedOperationState {
- google.protobuf.Timestamp applied_at = 1;
- TransactionInfo transaction_info = 2;
-}
-```
-
-#### gRPC API for Fetching State Information
-Currently `LedgerData` is used to represent the state responses:
-```proto
-message LedgerData {
- string transaction_id = 1; // ID of the transaction.
- Ledger ledger = 2; // Ledger the transaction was published to.
- TimestampInfo timestamp_info = 3; // The timestamp of the protocol event.
-}
-```
-
-We will replace it with `OperationData`:
-```proto
-message OperationData {
- string operation_id = 1; // ID of the operation.
- OperationState state = 2; // Operation state.
-}
-```
-
-### Atala Objects Background Processor
-Background Processor is a background job that processes incoming Atala objects and applies them to one of the relevant states (unconfirmed or confirmed) making sure that the states remain valid after the application. While this new service reduces the overall complexity from the other layers, this won't be trivial to implement.
-
-We will have two sets of tables:
-1. **Confirmed tables** `did_data_confirmed`, `public_key_confirmed`, `credential_batches_confirmed` and `revoked_credentials_confirmed` for representing confirmed state.
-2. **Unconfirmed tables** `did_data_unconfirmed`, `public_key_unconfirmed`, `credential_batches_unconfirmed` and `revoked_credentials_unconfirmed` for representing unconfirmed state.
-
-Practically, identical tables from both states can be parts of a common table (e.g. `did_data` with a boolean `verified` field). The developer would then need to be careful with what to pick for primary key as, for example, two DIDs with the same suffix can be present in `did_data`: one representing confirmed DID and one representing a modified unconfirmed DID. In this proposal, however, we want to highlight that nothing will be removed from the confirmed state tables while unconfirmed state tables are transient and will treat these table sets as different entities.
-
-Let's define **total tables** as the pair-wise union of the confirmed and unconfirmed tables (e.g. `did_data_confirmed` and `did_data_unconfirmed`) where conflicts are resolved in favor of the unconfirmed table (e.g. an unconfirmed DID overwrites the old confirmed entry for the same DID).
-
-Background processor will have a stream process that takes data from two sources:
-1. Confirmed Atala operations pulled from Cardano. For each confirmed Cardano transaction, we first check its compatibility with the data in confirmed tables. If it is compatible, we apply the transaction by inserting into/updating confirmed tables. Finally, we wipe the unconfirmed tables clean and try to reconstruct the unconfirmed state from scratch (the unconfirmed Atala object are still in the database). This will, however, be very slow, and we propose to follow this only as the very first iteration. Later, we can improve the process dramatically by checking all rows in unconfirmed tables on conflicts with the new confirmed operation (conflict detection and resolution will be defined later in the document) instead of trying to reconstruct the unconfirmed state.
-2. Unconfirmed Atala operations pulled from the new rows in the `atala_objects` table. Those will be checked on compatibility with the total tables and applied to the unconfirmed tables if compatible.
-
-All operations will be processed sequentially as it is important to not have any race conditions in the background processor. Priority of Atala objects for processing is defined in the following order:
-1. Firstly, confirmed Atala operations pulled from Cardano sorted by the pair (block number, index of transaction inside the containing block) in ascending order.
-2. Secondly, unconfirmed Atala operations sorted by insertion time into the database.
-
-It is worth remarking that this process **does not** handle rollbacks from Cardano (yet).
-
-Total state can be used to respond to the user requests.
-
-#### Conflict resolution (Stage 2)
-As mentioned in the previous section, there is a more efficient approach to recalculating the unconfirmed state. Let's say we have a confirmed state `S1` and an unconfirmed state `S1'` on top of it. We observe a new operation `T` affecting `S1` which results into a new confirmed state `S2`. Our thesis is that it is possible to express `S2'`, the new unconfirmed state, in terms of `S1'` with some transactions (or their reversals) applied. See Marcin's formalization document to gain more formal insight.
-
-An operation from the unconfirmed state is not necessarily compatible with the confirmed state (after it got updated with new confirmed blocks). The following compatibility table provides information on whether two operations (one confirmed and one unconfirmed) on the same subject/with the same content are compatible:
-
-| Confirmed \ Unconfirmed | Create DID | Update DID | Issue Cred Batch | Revoke Creds |
-| ----------------------- | -------------- | ---------- | ---------------- | -------------- |
-| Create DID | Replaced | Compatible | Compatible | Compatible |
-| Update DID | Replaced (N/A) | Incompatible* | Depends** | Depends** |
-| Issue Cred Batch | Replaced (N/A) | Compatible | Replaced | Compatible |
-| Revoke Creds | Replaced (N/A) | Compatible | Replaced (N/A) | Incompatible* |
-
-- **N/A** means this combination cannot be on the same subject (and hence is compatible)
-- **Replaced** means that the unconfirmed Atala object should be rejected and the operation removed from the unconfirmed state. The author of the object should be notified that his operation failed, but there is nothing to worry about and there is an identical operation already published by someone else.
-- **Replaced (N/A)** means that this combination should not happen normally since the unconfirmed Atala object would have been replaced by some other confirmed Atala object before current one
-- **Compatible** means that the unconfirmed Atala object should be kept as temporary applied, and the operation should be applied to the unconfirmed state. We will try to publish it once we finish catching-up with Cardano.
-- **Incompatible** means that the unconfirmed Atala object can no longer be applied on top of the confirmed state. This is a serious issue, and we must notify the author of Atala object and propose a solution for him/her. For example, if we have pulled a new `UpdateDID` operation from Cardano, then the unconfirmed `UpdateDID` must use that new operation's hash as the last referenced operation. Hence, a notification should pop-up that says "hey, we have fixed this operation for you, review and sign if you are still happy with the transaction". This can potentially lead to a complicated chain of incompatible transactions and needs way more elaboration (should we leave this out for now?), moreover if an element of the chain ignores the notification and does not resign the operation, the remainder of the chain will remain invalid forever.
-- **Depends** means that a deeper inspection into the operation is required to get one the other statuses
-
-\* Since `UpdateDID` and `RevokeCredentials` require specifying the last seen operation, they can no longer be applied in their submitted form. The author will need to sign a new operation that specifies the most recent operation on this DID/batch from Cardano ledger as the last operation.
-
-\*\* If the key used for issuance/revocation in the unconfirmed operation is also the one that is being revoked in the confirmed `UpdateDID`, then the operations are incompatible, otherwise they are compatible.
-
-TODO:
-1. We need an algorithm on how to propose fixes for incompatible conflicts.
-2. How to notify an issuer of the conflict and relay the proposed fix to them? We can probably use connector's capability to relay e2e messages and devise a special message type that would carry the proposed fix to the user's wallet.
-3. If the issuer ignores our notification, we need to be able to notify a higher-level entity in the MoE hierarchy. Some kind of hierarchical notification system must be designed to come with this proposal.
-
-### Atala Objects Transaction Publisher
-This is a background job that continuously monitors temporarily applied Atala objects and publishes them as Cardano transactions.
-
-While in online mode, Transaction Publisher will publish transactions one-by-one. In other words, Transaction Publisher will wait until its last submitted transaction got confirmed before submitting a new one. We will call such last submitted unconfirmed transaction a **blocking transaction**. While this might seem slow, it is actually necessary in order to keep the order in which the objects where submitted by the users. Consider a transaction `Y` that depends on a transaction `X`. Then `X` gets posted first then `Y` second. Later, for some reason, `X` fails while `Y` gets confirmed. Resubmitting `X` could succeed, but `Y` could be now invalid because it could depend on the state from `X`.
-
-Upon entering offline mode, Transaction Publisher will cancel the blocking transaction (via [Cardano forget transaction API](https://input-output-hk.github.io/cardano-wallet/api/edge/#operation/deleteTransaction)) and will be idle until we exit offline mode and catch up with Cardano.
-
-The general description above leaves three questions unanswered:
-1. How to detect when whether we entered the offline mode?
-2. How to detect when we exited the offline mode?
-3. How to detect when we caught up with Cardano?
-
-For the first question, we propose to use a well-known external third party service and ping it every minute. Once we get 3 confirmations that the external service is unreachable, we assume that we entered offline mode. We propose to use Google DNS (8.8.8.8) as the external service of our choice.
-
-The answer to the second question is symmetrical to the first one: we ping the external service in increasing time intervals (5s/15s/1m/5m/30m) until we can successfully reach it. Once we do so, PRISM Node is considered to be out of offline mode.
-
-Finally, to answer the third question we need to monitor the latest block reported by Cardano Wallet and monitor the latest processed operation by Background Processor (reflect in `atala_objects` table). Once they get reasonably close together, we can declare that PRISM Node is caught up with Cardano again.
-
-#### Batching (Stage 2)
-Transaction Publisher can batch several consecutive operations together in order to save some ADA. The question of how long Transaction Publisher should wait for the potential batch to form and what cost implications it will entail are up to analysis.
-
-
-### Infrastructure
-The deployment is expected to run a single database for the Node, with 1 or many Nodes connecting to it (as long as many nodes updating the database doesn't cause conflicts).
-
-
-## Alternatives
-
-### New service between clients and node
-Internally we have discussed adding a new micro-service on top of the Node, a centralized service that posts updates to Cardano. Such service handles all the operations by querying its local database.
-
-The reasons to prefer updating Node instead of adding a new service are:
-- The new micro-service will likely implement the same interface than the Node, it has to understand the Slayer 2 protocol, and, it needs to be aware of Cardano, adding a new service means that we'd have two services handling mostly the same stuff. The Offline mode could be a setting to enable it only when we need to.
-- Adding a new service can hide the Node dependency requirement because PRISM will work normally even without it, the team maintaining the infrastructure could easily forget to get the Node back working because it might seem unnecessary.
-- This would deviate the PRISM deployments because some of those will require the Offline mode while others may not, with a new service, any application involved should be aware of this, deploying different mobile apps just to enable this can be tricky.
-
-### New service between node and Cardano
-We have also discussed adding a micro-service in-between of `node` and Cardano. The idea is to make use of current `node`'s abstractions and move everything related to Cardano ledger to this service. Such service would have a notion of transaction being pre-published. `node` would see both published and pre-published transactions. In case of losing connectivity, such component would store pending transactions and attempt to publish them when connectivity is recovered.
-
-The reasons to prefer updating Node instead of adding a new service are:
-- The `node` would still have to know about the difference between confirmed and unconfirmed transactions. So a certain amount of code duplication will be necessary.
-- The current abstraction will not be very useful as it presumes the immutability of the ledger. This is not the case with unconfirmed state.
-- All data will be duplicated both on `node` and on such service.
-
-### Client-side queues
-We also discussed keeping the pending operations in the clients, retrying until Cardano is available.
-
-The main reason to not follow this approach is because this won't meet the MoE goals, which is keeping PRISM working regardless of Cardano's connectivity.
-
-### Allow Cardano to connect with the outside world
-Carlos already proposed this but the government won't allow it.
-
-## Conclusion
-We propose to implement offline mode in three stages:
-- **Stage 1**.
- - Introduce the breaking changes into gRPC API.
- - Implement Atala Objects Background Processor with naive unconfirmed state reconstruction.
- - Implement Atala Objects Transaction Publisher without batching. (TODO: there are still many details missing on how other parts of the publisher are supposed to work such as the offline mode detection)
-- **Stage 2**.
- - Design/implement conflict detection process for the Background Processor.
- - Implement batching for Atala Objects Transaction Publisher.
-- **Stage 3** (will be designed in a separate document).
- - Design/implement conflict resolution process for the Background Processor.
- - Design/implement a user notification system for conflict resolution proposals.
- - Design/implement a hierarchical notification system for notifying higher-level MoE entities in an event of the original issuer ignoring the notification.
diff --git a/docs/moe/offline-mode.md b/docs/moe/offline-mode.md
deleted file mode 100644
index da38ff3027..0000000000
--- a/docs/moe/offline-mode.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# PRISM Offline Mode
-This document describes an approach on how to keep some PRISM functionality while our Cardano node is isolated from the rest of the world.
-
-## Context
-In the context of Ministry of Education (MoE) deployment, the Ethiopian government can take the internet down for several weeks. When this happens services run by the government can still communicate with each other, but no service inside Ethiopia can communicate to the outside world.
-
-Since all PRISM services will be run by the government (see on-premise deployment design document for more details), it means that PRISM services can still communicate with each other (let it be Connector/Console/PRISM Node/Cardano Node etc). This allows clients to keep using the system for most operations, including the interactions with the PRISM node. As our Cardano instance will be disconnected from other Cardano nodes, no new transactions will be published on Cardano neither we will be getting new operations from Cardano.
-
-PRISM Node depends on the Cardano network to push changes to it (creating a DID, publishing credentials, etc), as well as to pull such changes from it once new Cardano transactions get confirmed.
-
-## Goal
-The goal of this document is to analyze the current state of affairs in respect to the MoE requirements and propose a way to streamline users' experience with PRISM in such **offline mode**.
-
-The gathered requirements on offline mode are as follows:
-- Users should be able to create and use unpublished DIDs.
-- Users should be able to create contacts.
-- Users should be able to establish connections with each other via connection tokens, including recently generated ones.
-- Issuers should be able to create credentials and share them with contacts.
-- Verifiers should be able to verify credentials that have already been published on Cardano, but the resulting verification status should be tied to a timestamp of the latest confirmed Cardano block.
-- Verifiers should be able to validate signatures of credentials that have not been published on Cardano yet. The resulting verification status should clearly reflect that the credential can not be verified yet, but was indeed issued by the correct issuer (assuming that the issuer's key is published and confirmed by PRISM Node).
-
-Note that users will not be able to publish new DIDs, update existing DIDs, revoke confirmed credentials or publish new credentials.
-
-## Current State
-Let's analyze the requirements and identify what is support by PRISM as is:
-- *Creating and using unpublished DIDs*. Already supported: unpublished DIDs can be generated client-side without any outside connectivity. The generated unpublished DID can be used for authorization, but not for issuing credentials (as the used key must be issuing key and not the master key).
-- *Creating contacts*. Already supported: creating a contact only involves management console and connector backends.
-- *Establish connections*. Already supported: establishing connections only involves connector backend.
-- *Creating and sharing credentials*. An issuer can create a credential and generate Merkle tree root with proofs without invoking any PRISM services. Sharing the credential, however, involves publishing credential issuance batch on PRISM Node first. In response, we get the id of the transaction that contains our issuance batch operation. The issuer can then monitor the status of the operation to decide when is the right time to share the credential with the holder. This approach has some shortcoming that we need to address:
- - If the initial transaction fails, PRISM Node will try to resubmit it resulting in a new transaction with a new id. This is especially important in offline mode as the transaction will be failing constantly. We need to figure out a way to identify operations differently.
- - Since we will not have transaction id anymore, we will need an alternative way to check operation status.
-- *Verifiers should be able to verify published credentials*. Mostly supported: PRISM Node holds the confirmed state in database and will be able to respond while Cardano is offline. However, the response is not timestamped with the latest Cardano block processed by PRISM Node.
-- *Verifiers should be able to validate signatures of unpublished credentials*. Already supported: if holder shares an unpublished credential with the verifier, the verifier can easily check the signature on their side and decide how to treat the credential accordingly.
-
-## Proposal
-The analysis of the current state of affairs has shown that most of the required functionality is already supported by PRISM. Let's tackle the remaining couple of problems:
-- *A new way to identify operations*. We already have a database table `atala_objects` that holds all submitted Atala objects along with their `atala_object_id`, but each object can actually contain multiple operations so Atala object id is not usable for identifying Atala operations. Let's introduce a new table `atala_operations` with `atala_operation_id` primary key. Since Slayer protocol does not allow two identical Atala operations to be posted, we can use operation's SHA256 hash as the identifier. We will also include a foreign key for keeping track of the Atala object the operation belongs to.
-- *A new way to check operation status*. We propose to implement a new RPC `GetOperationState` in PRISM Node that, given an Atala operation id, would respond with one of the following statuses:
- - Unknown: the operation is not a recognized Atala operation.
- - Pending submission: the operation was received by the node, but has not been sent to the blockchain yet.
- - Awaiting confirmation: the operation has been sent to the blockchain, but has not been confirmed yet.
- - Confirmed and applied: the operation has been confirmed in the blockchain and was successfully applied to the PRISM Node's state. The response will also include transaction id.
- - Confirmed and rejected: the operation has been confirmed in the blockchain and was deemed invalid according to the PRISM Node's state. The response will also include a reason for rejection.
-- *Timestamped responses*. We propose to add a new timestamp field to `GetDidDocument`, `GetOperationState`, `GetBatchState` and `GetCredentialRevocationTime` responses. The timestamp will represent when was the last time PRISM Node processed a new Cardano block (does not matter if it did or did not contain Atala objects). We can update it at the same time we update the last processed block and store it in the existing `key_values` table.
-
-### Queue for requests
-Aside from these functional requirements, we also need to be able to hold all unconfirmed operations in PRISM Node to publish them at a future date once Cardano comes back online. As was mentioned above, we persist all incoming Atala objects in `atala_objects` table, which is exactly what we want: a list of objects (containing Atala operations) that have not been published yet.
-
-Whether these Atala objects are going to be published correctly once Cardano comes back online still remains, however. We already have a mechanism for retrying old transactions, so occasionally we will try to resubmit transaction to the wallet. One thing we can also try is submitting transactions without TTL as was suggested by Adrestia team [in Slack](https://input-output-rnd.slack.com/archives/C819S481Y/p1621355796019700).
-
-### Batching
-Although not directly related to the proposal, we think it would be also worth mentioning batching capabilities for PRISM Node. Currently, we create a Cardano transaction per each Atala operation. We can improve the process by grouping multiple operations in a single Atala object. This will allow us to save on Cardano fees and increase system's throughput.
-
-The following must be considering for the implementation:
-- Since we do not have a defined order of Atala operations (unlike, for example, Ethereum where nonce represents the order of transactions from a given address), we can batch transactions in the order of their receival. This should be revisited if/when we introduce an ordering mechanism.
-- Until we have an ordering mechanism, DID key revocation operations cannot be safely sent along with operations that use that key within the same block.
-- PRISM Node allows submitting full Atala objects as well (as opposed to individual operation). We should flatten such objects into individual transactions, but also make sure to include all the operations from an object into one transaction.
-
-Note that the solution to problems discussed above will involve introducing deep introspection of incoming operations (something that Node does not do until the operation gets confirmed). Hence, this effort can be paired with pre-apply checks.
diff --git a/docs/monitoring/logging/README.md b/docs/monitoring/logging/README.md
index 4a0d42e160..cd4ca3d40e 100644
--- a/docs/monitoring/logging/README.md
+++ b/docs/monitoring/logging/README.md
@@ -4,12 +4,11 @@ This document was created to give you some insight into backend logging.
## Tools
-The preferable tool used in this project is [Tofu logging](https://docs.tofu.tf/docs/tofu.logging.home)
+The preferable tool used in this project is [Tofu logging](https://github.com/tofu-tf/tofu)
which have nice syntax and produces effectual log `F[Unit]`
In general, Tofu in our project produces structured and contextual logs.
-That means every logged value will be reflected as a structure in the log output (we use
-[ELKLayout](https://docs.tofu.tf/docs/tofu.logging.layouts#layouts) in every service)
+That means every logged value will be reflected as a structure in the log output in every service)
together with `trace-id`.
`trace-id` itself is a string that will be generated on a request or will be parsed from the response header (grpc metadata)
diff --git a/docs/monitoring/metrics/README.md b/docs/monitoring/metrics/README.md
index 6976754e9f..528a3648e2 100644
--- a/docs/monitoring/metrics/README.md
+++ b/docs/monitoring/metrics/README.md
@@ -6,9 +6,8 @@ These services use Kamon + Kanela agent for publishing metrics.
The default endpoint for metrics is `/metrics` on `9095` port.
## Basic available metrics
-Following metrics by name available for collection (both in [Prometheus](http://3.141.27.100:9090/) and [Grafana](http://3.141.27.100:3000/) `admin/iohk4Ever`).
-They contain basic tags such as job/instance and custom tags which are special for every custom metric.
+Metrics contain basic tags such as job/instance and custom tags which are special for every custom metric.
Also, you can find basic JVM metrics in the Prometheus/Grafana. Basic JVM metrics can be turned off in the Kamon config.
@@ -140,9 +139,4 @@ Also, you can find basic JVM metrics in the Prometheus/Grafana. Basic JVM metric
### Note: After restarting the service, all of these metrics will be reset.
-# How to add metrics to the scala backend service
-1. Make your module depends on common (common already have Kamon dependency).
-2. Add `Kamon.init()` to your module init.
-3. Add metrics into your code using utils from [prism-backend/common/src/main/scala/io/iohk/atala/prism/metrics/](https://github.com/input-output-hk/atala-tobearchived/tree/develop/prism-backend/common/src/main/scala/io/iohk/atala/prism/metrics).
-
For the full manual, you can check [Kamon official doc](https://kamon.io/docs/latest/guides/).
diff --git a/docs/new-diagrams/01-issuer-registration.png b/docs/new-diagrams/01-issuer-registration.png
deleted file mode 100644
index c5523a9b4c..0000000000
Binary files a/docs/new-diagrams/01-issuer-registration.png and /dev/null differ
diff --git a/docs/new-diagrams/01-issuer-registration.puml b/docs/new-diagrams/01-issuer-registration.puml
deleted file mode 100644
index d48adfb84f..0000000000
--- a/docs/new-diagrams/01-issuer-registration.puml
+++ /dev/null
@@ -1,20 +0,0 @@
-@startuml
-title Issuer Registration
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-participant "IOHK Server API" as ServerAPI
-
-group Register
- Issuer -> Issuer : Install Prism Wallet\n from Chrome/Firefox webstore
- Issuer -> BrowserWallet : Register
- BrowserWallet -> BrowserWallet : Generate DID
- BrowserWallet -> ServerAPI : Register DID
- ServerAPI -> ServerAPI : Publish DID on Cardano
- ServerAPI -> ServerAPI : Register in the server database
- ServerAPI -> BrowserWallet : DID registered
- BrowserWallet -> BrowserWallet : Encrypt wallet data
- BrowserWallet -> Issuer : Registration successful
-end
-@enduml
diff --git a/docs/new-diagrams/02-login.png b/docs/new-diagrams/02-login.png
deleted file mode 100644
index 58fcb00c0a..0000000000
Binary files a/docs/new-diagrams/02-login.png and /dev/null differ
diff --git a/docs/new-diagrams/02-login.puml b/docs/new-diagrams/02-login.puml
deleted file mode 100644
index 880a33c811..0000000000
--- a/docs/new-diagrams/02-login.puml
+++ /dev/null
@@ -1,17 +0,0 @@
-@startuml
-title Issuer Login
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-
-group Login
- Issuer -> ManagementConsole : Login
- ManagementConsole -> BrowserWallet : Login
- BrowserWallet -> Issuer : Asks to confirm logging\n in to the Management Console
- Issuer -> BrowserWallet : Approve login
- BrowserWallet -> ManagementConsole : Login successful
- ManagementConsole -> Issuer : Login successful
- note right of Issuer : Now every operation done in the\nManagement Console uses the DID as\n the authentication mechanism
-end
-@enduml
diff --git a/docs/new-diagrams/03-onboarding-persons.png b/docs/new-diagrams/03-onboarding-persons.png
deleted file mode 100644
index 2dbb165054..0000000000
Binary files a/docs/new-diagrams/03-onboarding-persons.png and /dev/null differ
diff --git a/docs/new-diagrams/03-onboarding-persons.puml b/docs/new-diagrams/03-onboarding-persons.puml
deleted file mode 100644
index ed848aa02f..0000000000
--- a/docs/new-diagrams/03-onboarding-persons.puml
+++ /dev/null
@@ -1,31 +0,0 @@
-@startuml
-title Connect with a person
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "IOHK Server API" as ServerAPI
-participant "Prism Mobile Wallet" as MobileWallet
-actor Holder
-
-group Connect to the holder's mobile wallet
- Issuer -> ManagementConsole : Create a person
- ManagementConsole -> ServerAPI : Create a person
- ServerAPI -> ManagementConsole : Person created
- ManagementConsole -> Issuer : Person created
-
- Issuer -> ManagementConsole : Generate connection token
- ManagementConsole -> ServerAPI : Generate connection token
- ServerAPI -> ManagementConsole : Connection token
- ManagementConsole -> Issuer : Connection token as QR Code
-
- Issuer -> Holder : Share QR Code
- Holder -> MobileWallet : Scan QR Code
- MobileWallet -> ServerAPI : Find QR Code details
- ServerAPI -> MobileWallet : Issuer details
- MobileWallet -> Holder : Asks to confirm connection
- Holder -> MobileWallet : Confirm connection
- MobileWallet -> ServerAPI : Accept connection
- ServerAPI -> MobileWallet : Connected
- MobileWallet -> Holder : Connected
-end
-@enduml
\ No newline at end of file
diff --git a/docs/new-diagrams/04-create-credential-group.png b/docs/new-diagrams/04-create-credential-group.png
deleted file mode 100644
index cd3ca1fd3e..0000000000
Binary files a/docs/new-diagrams/04-create-credential-group.png and /dev/null differ
diff --git a/docs/new-diagrams/04-create-credential-group.puml b/docs/new-diagrams/04-create-credential-group.puml
deleted file mode 100644
index d481b12c39..0000000000
--- a/docs/new-diagrams/04-create-credential-group.puml
+++ /dev/null
@@ -1,22 +0,0 @@
-@startuml
-title Create a credential group
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "IOHK Server API" as ServerAPI
-
-group Create a credential group
- Issuer -> ManagementConsole : Create a credential group
- ManagementConsole -> Issuer : Asks to choose the credential type
- Issuer -> ManagementConsole : Choose credential type
- ManagementConsole -> Issuer : Asks to choose the credential\ndetails for the whole group
- Issuer -> ManagementConsole : Credential details
- ManagementConsole -> Issuer : Asks to choose the persons\ninvolved in the credential
- Issuer -> ManagementConsole : Choose the person to issue the credential to
- ManagementConsole -> Issuer : Asks for the person details required by the credential
- Issuer -> ManagementConsole : Person details
- ManagementConsole -> ServerAPI : Create credential group
- ServerAPI -> ManagementConsole : Credential group created
- ManagementConsole -> Issuer : Credential group created
-end
-@enduml
diff --git a/docs/new-diagrams/05-issue-credential.png b/docs/new-diagrams/05-issue-credential.png
deleted file mode 100644
index dd1b5580a4..0000000000
Binary files a/docs/new-diagrams/05-issue-credential.png and /dev/null differ
diff --git a/docs/new-diagrams/05-issue-credential.puml b/docs/new-diagrams/05-issue-credential.puml
deleted file mode 100644
index f20ef7e01a..0000000000
--- a/docs/new-diagrams/05-issue-credential.puml
+++ /dev/null
@@ -1,22 +0,0 @@
-@startuml
-title Issue credential
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-participant "IOHK Server API" as ServerAPI
-
-group Issue the credential to the group
- Issuer -> ManagementConsole : Issue credential group
- ManagementConsole -> BrowserWallet : Issue credential group
- BrowserWallet -> Issuer : Asks to confirm the operation
- Issuer -> BrowserWallet : Confirm
- BrowserWallet -> BrowserWallet : Sign credentials
- BrowserWallet -> ServerAPI : Store and issue credentials
- ServerAPI -> ServerAPI : Add issuance proof to Cardano
- ServerAPI -> ServerAPI : Store the signed credentials
- ServerAPI -> BrowserWallet : Credentials issued
- BrowserWallet -> ManagementConsole : Credentials issued
- ManagementConsole -> Issuer : Credentials issued
-end
-@enduml
diff --git a/docs/new-diagrams/06-share-credential-with-holder.png b/docs/new-diagrams/06-share-credential-with-holder.png
deleted file mode 100644
index 649c9cf12d..0000000000
Binary files a/docs/new-diagrams/06-share-credential-with-holder.png and /dev/null differ
diff --git a/docs/new-diagrams/06-share-credential-with-holder.puml b/docs/new-diagrams/06-share-credential-with-holder.puml
deleted file mode 100644
index 74711621de..0000000000
--- a/docs/new-diagrams/06-share-credential-with-holder.puml
+++ /dev/null
@@ -1,22 +0,0 @@
-@startuml
-title Share credential
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-participant "IOHK Server API" as ServerAPI
-participant "Prism Mobile Wallet" as MobileWallet
-
-group Share the credential to the group
- Issuer -> ManagementConsole : Share credential group
- ManagementConsole -> BrowserWallet : Encrypt credentials group
- BrowserWallet -> Issuer : Asks to confirm the operation
- Issuer -> BrowserWallet : Confirm
- BrowserWallet -> BrowserWallet : Encrypt credentials
- BrowserWallet -> ManagementConsole : Encrypted credentials
- ManagementConsole -> ServerAPI : Send encrypted credentials
- ServerAPI -> MobileWallet : Send credential
- ServerAPI -> ManagementConsole : Credentials sent
- ManagementConsole -> Issuer : Credentials sent
-end
-@enduml
\ No newline at end of file
diff --git a/docs/new-diagrams/07-share-credential-to-verify.png b/docs/new-diagrams/07-share-credential-to-verify.png
deleted file mode 100644
index b5a192e388..0000000000
Binary files a/docs/new-diagrams/07-share-credential-to-verify.png and /dev/null differ
diff --git a/docs/new-diagrams/07-share-credential-to-verify.puml b/docs/new-diagrams/07-share-credential-to-verify.puml
deleted file mode 100644
index 3d26f69539..0000000000
--- a/docs/new-diagrams/07-share-credential-to-verify.puml
+++ /dev/null
@@ -1,20 +0,0 @@
-
-@startuml
-title Share credential to be verified
-
-participant "IOHK Server API" as ServerAPI
-participant "Prism Mobile Wallet" as MobileWallet
-actor Holder
-
-group Share the credential to the verifier entity
- Holder -> MobileWallet : View credential to share
- MobileWallet -> Holder : Credential
- Holder -> MobileWallet : Share
- MobileWallet -> Holder : Ask who to share with
- Holder -> MobileWallet : Choose recipient
- MobileWallet -> MobileWallet : Encrypt credential
- MobileWallet -> ServerAPI : Share encrypted credential
- ServerAPI -> MobileWallet : Credential shared
- MobileWallet -> Holder : Credential shared
-end
-@enduml
diff --git a/docs/new-diagrams/08-verify-credential-steps.png b/docs/new-diagrams/08-verify-credential-steps.png
deleted file mode 100644
index 92f0f96264..0000000000
Binary files a/docs/new-diagrams/08-verify-credential-steps.png and /dev/null differ
diff --git a/docs/new-diagrams/08-verify-credential-steps.puml b/docs/new-diagrams/08-verify-credential-steps.puml
deleted file mode 100644
index 4e385b2188..0000000000
--- a/docs/new-diagrams/08-verify-credential-steps.puml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-@startuml
-title Verify a credential
-
-actor Verifier
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-participant "IOHK Server API" as ServerAPI
-
-group Get a credential shared from a mobile wallet
- Verifier -> ManagementConsole : Choose a person
- ManagementConsole -> Verifier : Ask to choose the credential
- Verifier -> ManagementConsole : Choose credential
- ManagementConsole -> ServerAPI : Get credential
- ServerAPI -> ManagementConsole : Credential
-end
-
-group Verify a credential
- ManagementConsole -> BrowserWallet : Verify credential
- BrowserWallet -> BrowserWallet : Decrypt credential
- BrowserWallet -> ServerAPI : Get credential state from Cardano
- ServerAPI -> BrowserWallet : Credential state
- BrowserWallet -> ServerAPI : Get issuer DID from Cardano
- ServerAPI -> BrowserWallet : DID Document
- BrowserWallet -> BrowserWallet : Verify credential state isn't revoked/expired
- BrowserWallet -> BrowserWallet : Verify issuance date
- BrowserWallet -> BrowserWallet : Verify issuer key was valid when issuing the credential
- BrowserWallet -> BrowserWallet : Verify issuer key signed the credential
- BrowserWallet -> ManagementConsole : Verification result
- ManagementConsole -> Verifier : Verification result
-end
-@enduml
diff --git a/docs/new-diagrams/09-revoke-credential.png b/docs/new-diagrams/09-revoke-credential.png
deleted file mode 100644
index 7ac400ce40..0000000000
Binary files a/docs/new-diagrams/09-revoke-credential.png and /dev/null differ
diff --git a/docs/new-diagrams/09-revoke-credential.puml b/docs/new-diagrams/09-revoke-credential.puml
deleted file mode 100644
index 4290b9dcaa..0000000000
--- a/docs/new-diagrams/09-revoke-credential.puml
+++ /dev/null
@@ -1,23 +0,0 @@
-@startuml
-title Revoke credential
-
-actor Issuer
-participant "Management Console" as ManagementConsole
-participant "Prism Browser Wallet" as BrowserWallet
-participant "IOHK Server API" as ServerAPI
-
-group Revoke the credential
- Issuer -> ManagementConsole : Find credentials
- ManagementConsole -> Issuer : Credentials
- Issuer -> ManagementConsole : Choose a credential to revoke
- ManagementConsole -> BrowserWallet : Revoke credential
- BrowserWallet -> Issuer : Asks to confirm the operation
- Issuer -> BrowserWallet : Confirm
- BrowserWallet -> BrowserWallet : Sign revoke operation
- BrowserWallet -> ServerAPI : Submit revocation
- ServerAPI -> ServerAPI : Add revocation proof to Cardano
- ServerAPI -> BrowserWallet : Credential revoked
- BrowserWallet -> ManagementConsole : Credential revoked
- ManagementConsole -> Issuer : Credentials revoked
-end
-@enduml
diff --git a/docs/new-diagrams/README.md b/docs/new-diagrams/README.md
deleted file mode 100644
index d1dae83d92..0000000000
--- a/docs/new-diagrams/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# New diagrams
-
-This folder includes the architecture/sequence diagrams updated at August/2020.
-
-## New group flow
-
-Describes the proposal to let organizations import their data into our system:
-![diagram](new-group-flow.png)
-
-## New flow description for the commercial team
-
-### Current architecture
-![diagram](architecture.png)
-
-### Issuer registration
-![diagram](01-issuer-registration.png)
-
-### Login
-![diagram](02-login.png)
-
-### Onboarding contacts
-![diagram](03-onboarding-persons.png)
-
-### Create credential group
-![diagram](04-create-credential-group.png)
-
-### Issue credential
-![diagram](05-issue-credential.png)
-
-### Share credential
-![diagram](06-share-credential-with-holder.png)
-
-### Share credential to be verified
-![diagram](07-share-credential-to-verify.png)
-
-### Verify credential steps
-![diagram](08-verify-credential-steps.png)
-
-### Revoke credential
-![diagram](09-revoke-credential.png)
diff --git a/docs/new-diagrams/architecture.png b/docs/new-diagrams/architecture.png
deleted file mode 100644
index 566b9ad225..0000000000
Binary files a/docs/new-diagrams/architecture.png and /dev/null differ
diff --git a/docs/new-diagrams/architecture.puml b/docs/new-diagrams/architecture.puml
deleted file mode 100644
index 9c22c40a98..0000000000
--- a/docs/new-diagrams/architecture.puml
+++ /dev/null
@@ -1,70 +0,0 @@
-@startuml
-
-title Prism Architecture
-
-left to right direction
-
-skinparam {
- ArrowColor Red
- linetype ortho
-}
-
-skinparam cloud {
- BorderColor Black
-}
-
-cloud "IOHK Cloud" as IOHKCloud {
- rectangle Connector {
- node "Connector Server" as ConnectorServer
- database "Connector Database" as ConnectorDatabase
- ConnectorServer --> ConnectorDatabase
- }
-
- rectangle "Management Console Backend" as ManagementConsole {
- node "Management Console Server" as ManagementConsoleServer
- database "Management Console Database" as ManagementConsoleDatabase
- ManagementConsoleServer --> ManagementConsoleDatabase
- }
-
- rectangle Node {
- node Cardano
- node "Prism Node" as PrismNode
- database "Prism Node Database" as PrismNodeDatabase
- PrismNode --> PrismNodeDatabase
- PrismNode ---> Cardano
- }
-
- rectangle Frontend {
- node "Reverse Proxy" as ReverseProxy
- node "Management Console WebApp" as WebApp
- }
-
- ReverseProxy ---> ConnectorServer
- ReverseProxy ---> ManagementConsoleServer
- ReverseProxy ---> PrismNode
- ManagementConsoleServer -left-> ConnectorServer
- ManagementConsoleServer --left--> PrismNode
-}
-
-rectangle "Issuer Computer" as IssuerComputer {
- [Prism Wallet] as BrowserWallet
- [Management Console WebApp] as IssuerWebApp
- IssuerWebApp -> BrowserWallet
- IssuerWebApp -left-> ReverseProxy
-}
-
-rectangle "Verifier Computer" as VerifierComputer {
- [Prism Wallet] as VerifierWallet
- [Management Console WebApp] as VerifierWebApp
- VerifierWebApp -> VerifierWallet
- VerifierWebApp --> ReverseProxy
-}
-
-HolderPhone -[hidden]- IOHKCloud
-
-rectangle "Holder Phone" as HolderPhone {
- [Mobile Wallet] as MobileWallet
- MobileWallet -> ReverseProxy
-}
-
-@enduml
\ No newline at end of file
diff --git a/docs/new-diagrams/new-group-flow.png b/docs/new-diagrams/new-group-flow.png
deleted file mode 100644
index 18f52f7b9d..0000000000
Binary files a/docs/new-diagrams/new-group-flow.png and /dev/null differ
diff --git a/docs/new-diagrams/new-group-flow.puml b/docs/new-diagrams/new-group-flow.puml
deleted file mode 100644
index 24c9c7ad35..0000000000
--- a/docs/new-diagrams/new-group-flow.puml
+++ /dev/null
@@ -1,42 +0,0 @@
-@startuml
-title Potential friendly flow for importing data from issuers, and issuing credentials
-
-participant Wallet as "Wallet"
-participant Issuer as "Issuer"
-participant Console as "Management Console"
-participant Backend as "Backend"
-
-group Import persons
- note right of Issuer : The backend would allow the field uniqueId\n which every issuer must provide\n no person could share this id on the same issuer
- Issuer -> Console : Upload CSV file with the person list\n which needs a name, and the uniqueId
-
- Console -> Backend : Push the persons on the CSV file
- note over Console : The backend would update person name\n when detecting a duplicated person\n given the uniqueId, ideally, the issuer\n could choose what to do\n these persons aren't assigned to any group
-
- Backend -> Console : Ack
- Console -> Issuer : Ack
- note right of Issuer : Now it's time to connect to every student
-end
-
-group Import credentials data
- note right of Issuer : Includes the credential data for each subject\n the subject is referrenced by its uniqueId \nfor example, each subject would\n have it's award/GPA/etc\n Also, the issuer needs to fill the credential\n details applying to all persons\n like the institution name, issuing date/etc
- Issuer -> Console : Upload the credentials CSV file
- note right of Issuer : The issuer needs to choose on which group will\n be those credentials assigned to, or create a new group
- Console -> Backend : Push the credentials on the CSV file
- note over Backend : The backend needs to combine the data\n on existing groups when necessary
- Backend -> Console : Ack
- Console -> Issuer : Ack
-end
-
-group Issue verifiable credentials
- Issuer -> Console : Issue the selected credentials in bulk
- Console -> Wallet : Issue credentials
- Wallet -> Issuer : Asks to confirm the credentials to issue
- Issuer -> Wallet : Confirms
- Wallet -> Backend : Issue credentials
- Backend -> Wallet : Ack
- Wallet -> Console : Ack
- Console -> Issuer : Ack
-end
-
-@enduml
diff --git a/docs/node/SubscriptionMechanism.md b/docs/node/SubscriptionMechanism.md
deleted file mode 100644
index 9faa5f5d97..0000000000
--- a/docs/node/SubscriptionMechanism.md
+++ /dev/null
@@ -1,234 +0,0 @@
-## Subscriptions and their use cases
-Before jumping to description of details of subscription mechanism, we will describe some sensible
-use cases which might be useful for various parties of the system.
-
-Use cases:
-1. Wallet wants to be notified about all operations, which have been confirmed (approved or rejected) in the Cardano network,
- relevant to DIDs owned by the wallet user. It's useful in order to keep Wallet's state up to date.
-There are a lot of use cases which fall into this category, some of them:
- * Issuer wants to know about all credential issuance/revocations signed with their issuing key,
- which could have been shared with some trusted party (e.g. departments at the university).
- * Issuer wants to know about a key creation/revocation which happened from another Management Console.
- * A system with two or more Atala nodes, whose API can be used at the same time.
- * Wallet wants to keep track of new user's DIDs.
-
- _CreateDID_, _UpdateDID_, _IssueCredentialBatch_, _RevokeCredentials_ are operations which a wallet wants to be aware of.
-
-2. Verifier wants to be notified if a _particular_ credential is revoked, and it was approved in the Cardano network.
- Verifier is interested if a credential which was provided by a holder is still valid.
-
-3. Wallet wants to be notified about _particular_ `AtalaOperation`'s status
- (_PENDING_, _CANCELLED_, _REJECTED_, _IN_BLOCK_, _CONFIRMATION_LEVEL n_, _APPROVED_),
- in order to show a user status of operation in real time to make a wallet interface more responsive.
-
-4. Wallet wants to be notified when a node went offline mode and get back to online.
-
-The last two cases and similar ones will be out of the consideration of this document for now,
-we will mostly focus on notifications about Atala operations.
-
-## High level overview of the protocol
-Suggested protocol is inspired by [BIP-157](https://en.bitcoin.it/wiki/BIP_0157) and [BIP-158](https://en.bitcoin.it/wiki/BIP_0158).
-
-The core structure of this approach is Golomb Coded Set filter (GCS), which is akin a Bloom filter
-with smaller serialisation size but slower query time.
-This structure is basically a compressed sorted list of hashes computed for original entries.
-In such list we can check if an entry hash exists, however as we rely on hashes but not on the original entries,
-such existence test is probabilistic (despite the probability is close to 1),
-consequently, a test can give a false positive result.
-Hence, every positive check has to be double-checked on the original list.
-
-The protocol in nutshell: when a node receives an Atala block,
-it computes GCS for relevant data of the block operations, sends the resulting GCS to a client,
-the client tests if there is at least one event it listens to, and if so the client requests them.
-We offload the testing logic to clients in order to reduce load on a node.
-
-Let's move on to more detailed description.
-Let's assume that a node has the list of subscribers, stored in memory,
-and each of those has associated gRPC stream.
-Then that will happen on new event:
-1. when a block arrives, a node computes a GCS for it, and saves it and GCS in a persistent storage,
- sends the GCS to all subscribers' streams
-2. when a client receives a GCS, it checks if there are entries in the GCS which a client is interested in.
- If there are any, in a separate connection a client requests an original block which the GCS was derived from,
- otherwise it saves a corresponding Atala object id as the last known to a local persistent storage.
-3. in case, if a client requests a block for GCS, a node responds to the client with it.
- This message is sent to the separate connection, not to the associated stream.
-4. upon receiving a block, a client handles it and saves a corresponding Atala object id as the last known.
-
-Pay attention, that instead of building GCS for an Atala block, node could build it for
-any kind of thing, for example, for sequence of block, for an Atala operation, or even
-for a node lifetime event (e.g. a node disconnected from the Cardano network).
-This flexibility might be used in future versions of the protocol.
-
-Another subject for discussion is whether we need to store all kind of events persistently.
-For example, a node disconnect event seems unimportant after a node connects back,
-but let's leave this question for future discussion.
-
-Also, in the step 4th it depends on actual implementation who will respond to a client with events.
-In the simplest case, it might be a node itself, however,
-in order to reduce load on a node, a reverse proxy or some kind of cache might be in front of the node.
-
-Let's get back to our assumption about the subscribers list on the node,
-and describe how a client will actually connect to a node:
-1. a client initiates a stream connection to a node sending a last known Atala object id if any
-2. a node responds with GCSs from its persistent storage to the stream
-3. a client filters out received GCSs, and requests corresponding events from the node in a separate connection(s)
-4. a node responds with requested events
-5. a client receives them, update its last known Atala object id, then check that they actually match its filters, and handle matched ones
-6. after that, a client moves to the previously described flow
-
-## Filters and related types
-In this section we will outline _filter_ types,
-which a client leverages to specify which operations it's interested in.
-
-We start with some auxiliary and abstract definitions.
-```scala
-sealed trait ConfirmedStatus extends EnumEntry with Snakecase
-object ConfirmedStatus extends Enum[ConfirmedStatus] {
- val values = findValues
-
- final case object AppliedConfirmedStatus extends ConfirmedStatus
- final case object RejectedConfirmedStatus extends ConfirmedStatus
-}
-
-abstract class SubscriptionFilter(val status: Option[ConfirmedStatus]) {
- // Hash for GCS filter
- def sipHash: Long
-}
-
-// GCS doesn't support insertion, it can be built only for fixed number of elements
-class GCS(val operations: List[Operation], val p: Int) {
- val m: Long = 1L< DidOperationFilter(DidUpdateTag, did, Some(op.status))
- // the similar code here
- }
- new GCS(operationToFilters(op), P) // P here is a parameter of GCS, will be specified during the implementation
-}
-```
-* As an optimization we could respond with a batch of several Atala objects `GCS` in `GetGCSStreamResponse`.
- Also, we could add an extra `GCS` built for all operations from all the objects in the batch,
- then a client could quickly skip bigger batches of irrelevant operations.
-
-Sketch of a possible low-level client SDK interface:
-```scala
-abstract class NodeSubscriber(nodeService: NodeService) {
- var filters: Set[SubscriptionFilter]
-
- def subscribe(filters: Set[SubscriptionFilter]): fs2.Stream[OperationOutcome]
- def removeFilter(filter: SubscriptionFilter): Unit
- def addFilter(filter: SubscriptionFilter): Unit
- def unsubscribe(): Unit
-}
-```
-
-`subscribe` is the most difficult method to implement, and
-possibly, the trickiest part is to achieve a decent level of parallelism on a reconnection,
-at the same time preserving the linear order of operations.
-The obvious option could be to send `GetAtalaObjectRequest` sequentially, waiting for the corresponding response.
-
-Interface above is a low-level and is unlikely to be used directly by an SDK user
-because the main use case is to be notified about DID document updates and operations with credential issuance/revocation.
-The high level interface might look like:
-```scala
-abstract class DidEventsSubscriber(nodeService: NodeService, val didSuffix: String) {
- var filters: Set[SubscriptionFilter]
-
- // All APPROVED operations related to the passed DID
- def didOperations(): fs2.Stream[Operation]
-
- // All APPROVED DID updates (without issuance and revocations)
- def didDocumentUpdates(): fs2.Stream[Operation]
-}
-```
-
-Implementation of `DidEventsSubscriber` will use an implementation of low level `NodeSubscriber`.
-
-## Advantages, disadvantages and alternatives
-Several advantages of the suggested approach are:
-* No persistent data on a node, no IO overhead
-* As GCSs are sent to clients, clients' DIDs are kept private and can't be exposed easily
-* In-memory data size is `O(N)`, where `N` is number of clients, as only connections with subscribers are held
-* Smaller load on a node, as all CPU consuming tasks are performed by clients
-* Load on a node can be easily reduced by setting up reverse proxies in front of the node
-Disadvantages:
-* Communication overhead on unrelated to a client GCSs notifications
-* Requires implementation and maintenance of the code on both sides: node and client SDK
-
-Alternative obvious approach could be sending a client's GCS to a node in order to reduce communication overhead.
-In this case, a node would have to merge all received GCSs to some advanced structure
-to be able to look up relevant subscribers quickly for every confirmed Atala block.
-But this approach would require some sophisticated structures to be implemented,
-what will cause huge amount of data kept in memory and much bigger load on a node due to searches in the structure.
-Another disadvantage is that such structure can't be easily updated with new filter: it would require `O(K * F)`
-operations on every update on every subscriber,
-where `K` is the number of filters a client is interested in, `F` - time which needed to insert one hash in the structure
-(something logarithmic).
-
-Next idea which might come into mind: not use GCS, explicitly send filters to a node,
-keep all the information in SQL to reduce size of in-memory stored data and to perform searches
-of relevant subscribers quicker. But this makes client exposed to the man-in-the middle attack (message encryption needed),
-still might cause hig load on a node, and moreover cause IO load which could be even worse than CPU load.
-
-All in all, after long consideration the suggested approach was deemed as a good trade-off between
-higher performance and implementation complexity.
diff --git a/docs/node/operaions-lifecycle-in-node-service.md b/docs/node/operaions-lifecycle-in-node-service.md
index 2685d97452..e5f3d5ce0d 100644
--- a/docs/node/operaions-lifecycle-in-node-service.md
+++ b/docs/node/operaions-lifecycle-in-node-service.md
@@ -3,7 +3,7 @@
## How PRISM Node processes operations
User issues new operations using one of these gRPC calls:
-1. Every operation has its corresponding gRPC call: `CreateDID`, `UpdateDID`, `IssueCredentialBatch`, `RevokeCredentials`.
+1. Every operation has its corresponding gRPC call: `CreateDID`, `UpdateDID`, `ProtocolVersionUpdateOperationType`.
2. In addition to this, we have a gRPC call for sending several operations at once: `PublishAsABlock`.
After receiving operations in any of these calls, Node service forwards them to the `objectManagementService.sendAtalaOperations` method.
@@ -13,8 +13,8 @@ After receiving operations in any of these calls, Node service forwards them to
`sendAtalaOperations(op: node_models.SignedAtalaOperation*): Future[List[AtalaOperationId]]`
This method does the following:
-- Accepts a list of [SignedAtalaOperation](https://github.com/input-output-hk/atala-prism-sdk/blob/master/protosLib/src/main/proto/node_models.proto#L147)
-- Creates a new [AtalaObject](https://github.com/input-output-hk/atala-prism-sdk/blob/master/protosLib/src/main/proto/node_internal.proto#L18)
+- Accepts a list of `SignedAtalaOperation`
+- Creates a new `AtalaObject`
- Serializes the `AtalaObject` into an array of bytes using protobuf
- Stores `(objectId, objectBytes)` into `atala_objects` database table
- Stores every operation into `atala_operations` table with a new status `RECEIVED`
diff --git a/docs/node/operations-ordering-submission.md b/docs/node/operations-ordering-submission.md
deleted file mode 100644
index abc0b1de6a..0000000000
--- a/docs/node/operations-ordering-submission.md
+++ /dev/null
@@ -1,133 +0,0 @@
-# The life cycle of an AtalaOperation
-
-The goal of this document is to explain the current flow that an AtalaOperation, the issues we are discussing in the node implementation, and the
-proposed solutions so far
-
-
-The flow of an operations looks as follows:
-1. A user creates and signs the operations using our SDK
-1. The user sends the operation to the node (either through an operation specific RPC like createDID, updateDID, or through publishAsABlock)
-1. The node receives the operation and stores it in a db table as received. It returns an operation id to the user for him to track the operation
- status
-1. The node periodically polls the table with received operations and gathers them in blocks of `ATALA_BLOCK_SIZE`
-1. The node submits one transaction per block generated in the previous step
-
-## Current issues
-
-### Operations ordering
-
-There are some operations in our protocol that have an explicit ordering required. For instance, `UpadteDID` operations point to a previous update or
-a `CreateDID` using a `previousHash` field. `RevokeCredentials` operations also point to an `IssueBatch` operation. In other cases, there is an
-implicit order between operations. For example, if a user submits an `IssueBatch` operation that is signed with `key1`, and then an `UpdateDID`
-operation that revokes `key1`, then the protocol will only consider the sequence of operations as valid if they are processed in that specific order.
-This is, the user could gather operations in a single transaction in an atala block `B` of the form `[issueBatch, update]`. However, when the
-transaction `tx1` carrying `B` goes to the Cardano mempool, it is theoretically possible for an attacker to see the atala block `B`, extract the
-`update` operation and submit it in a separate transaction `tx2`. If `tx2` is confirmed before `tx1`, then the `issueBatch` operation will be rejected
-by the PRISM nodes (because it would be signed with a revoked key).
-When could this occur?
-- Regular key rotation for security good practice
-- An issuer could issue some batches, and suddenly detects a batch issued that he does not recognizes. He would like to submit an `updateDID` event
- revoking the issuing key while there may still be other valid `issueBatch` events waiting for confirmation
-
-The above scenario could be more problematic in cases where the atala blocks are full of dependent operations. This could happen either because of
-sub-optimal use of batching, or due to high-throughput demand.
-
-Note that it is not a problem if the user sends a block with operations `[update, issueBatch]` where `issueBatch` is signed by a key added in the
-`update` operation. This is because if the attacker sends the `issueBatch` operation to the chain, it will just be consider invalid by PRISM nodes,
-and later will be considered valid in the block.
-
-We could mitigate this scenarios by:
-- Waiting for all events associated to a key that will be revoked to be confirmed by PRISM nodes (this takes many minutes) before submitting key
- revocation. This could become an issue if multiple nodes send operations associated to the same key, but we see this unlikely.
-- requesting the clients an smarter use of keys. If a user knows he will revoke a key, ask him to not send events dependent on that key. E.g. instead
- of sending an operation, and then revoke the key that signs it, suggest to first rotate a key, and then perform operations with a new key.
-
-Some other alternatives are:
-- add a multisig like operation (not that easy)
-- add a previousHash to ALL our operations except for CreateDID. This would make all ordering explicit, and would make the attack described above
- impossible because the order is imposed by the previousHash fields and those are tamper proof due to the operation signature. The main issue is that
- this could make multi-node management more complex in the future. Imagine multiple node issuing credentials using different keys that belong to the
- same DID.
-
-For developer experience, we incline to believe that adding waiting time for the case of key rotation will be enough. We believe that in the regular
-use patter, it won't be a frequent situation. We could also give a warning to a user when we sends an update that will cause a delay and suggest the
-user to delete operations that depend on a revoked keys, and re-sign the operations with another one.
-
-### Wallet overload and general throughput
-
-Today we are calling the Cardano wallet without properly managing errors. We created a story to improve this.
-One particular problem is that the Cardano wallet may not have enough available UTxOs to send the needed number of transactions. Note that this is
-different from not having enough funds. A Cardano transaction consumes a number of UTxO (inputs), and creates new ones (called outputs).
-- The wallet will start with one UTxO,
-- Imagine that the first transaction will create an output with X ADA (today 1 ADA), and the remaining ADA (minus fees) will be located in another
- output of the transaction. This will leave us with 2 UTxOs that will be available for the next transaction. However, the two new UTxOs won't be
- usable (as inputs of new transactions) until the transaction that creates them is confirmed.
-
-A simple rule of thumb is
-
- The more UTxOs with enough ADA we have, the more parallel transactions we can send.
-
-Now, if the node queue has enough operations to fill many transactions we have two challenges to solve
-1. Minimize the waiting time to get all operations confirmed
-1. Guarantee order of operations, which may force to add waiting time (as mentioned by an alternative in previous section)
-
-There are a few important observations to tackle these challenges
-1. The time required to submit all transactions has a dependency on wallet UTxO distribution
-1. The wallet does not provide a rich API to manage transactions yet. But we can call
- https://input-output-hk.github.io/cardano-wallet/api/edge/#operation/getWalletUtxoSnapshot and
- https://input-output-hk.github.io/cardano-wallet/api/edge/#operation/getWallet to get available balances and a notion of the UTxOs available.
-
-
-In order to increase the parallel submission throughput, we will have to implement an heuristic to increase the UTxO set size.
-For example, every X minutes, we could call `getWalletUtxoSnapshot` and obtain the maximum amount in a single UTxO, say M. Then if M is above certain
-threshold, we could create a transaction that sends ~2 ADA (this is DEPOSIT + MAX FEE) to a number N of outputs (we need to measure the limit based
-on transaction size). This may force the wallet to break the output with M ADA into small UTxOs that can later be consumed in parallel.
-Unfortunately, we cannot guarantee if this will work, as the wallet uses an internal coin selection algorithm. We should refine this strategy, but it
-was a suggestion from the wallet team.
-
-On the second topic, in cases where the queue is big, we may have atala blocks that contain transactions that depend one upon the other. For example,
-an `issueBatch` operation in one block may depend on a DID update located in the other block.
-We can classify operations as follow:
-1. build a dependency graph between operations. This is, for a queue that contains the ordered operations `o1, o2,..., on` we say that `oi` depends
- on `oj` iff
- - j < i and any of the following is valid
- + `oi.previousHash == hash(oj)`, or <-- these can be sent in the same block safely
- + 'oj` adds the key that signs `oi`, or <-- these can be sent in the same block safely because is a key addition
- + 'oj' is signed by a key that is revoked by `oi` <-- here is the only case where `oi` needs to wait for `oj` to be confirmed
-
- NOTE: In the third case, if the user does not want to wait, he could have first send the update revoking the key, and adding a new key, and then
- add `oj` signed with the newly added key. We can even notify this to the user.
-
- The graph should be composed by a set of directed graphs
- All nodes in a sub-graph should be associated to the same DID. We can recognized some use errors while constructing the graph, e.g.
- if we have a directed loop, this indicates an error on user side
- if two updateDIDs point to the same previousHash, we have a conflict to inform to the user
- if an operation has a previousHash field that matches no know operation
- if an operation is signed with an unknown key
- Different sub-graphs will be completely independent and would be posted in any order. These could share an atala block and allow for the use of
- parallel transactions. We also note that the only vertexes that will generate connections to many other vertexes are:
- + `UpdateDID`s as they can add/remove keys used in other operations
- + `IssueBatch`s which can be the `previousHash` of multiple `RevoceCredentials`
- Ideally, a sub-graph will fit in a single transaction and there will not exist dependencies of type 3 (a key is revoked by an update, but we need
- to guarantee order between that update and an operation that uses the said key). If we estimate an average operation size of 500 bytes, then we
- should be able to allocate ~30 operations in a single atala block
- There are then two cases that become troublesome.
- 1. when a sub-graph without type dependencies does not fit in a single transaction.
- Let B1,..., Bn the dependent Atala blocks that contain a sub-graph that does not fit in a single block.
- Let Txi the transaction that carries block Bi.
- We want Txj to be confirmed before Tx{j+1} for each j \in {1,..., n-1}
- Given the limited control on transaction construction, we could send Txi and wait until fully confirmed before sending Tx{i+1}. If we do not
- wait, we may have issues related to rollbacks.
- While we wait, we can still send transactions involved with other atala blocks
- 1. When the sub-graph does fit in a single transaction, but we have dependencies of type 3. In this case, we can split the sub-graph into two
- blocks, treat them as dependent blocks that do not fit in a single transaction, and proceed as described before.
-
-## Summary
-
-The problem that arises from submitting a key revocation while we have pending operations to be published that depend on the revoked key, looks
-manageable by suggesting the user to use keys in a smarter way. If we later develop a wallet backend logic, we would be able to manage this on
-behalf of the user, as the wallet could encapsulate the logic of querying the node for dependencies and use keys in a reasonable way.
-Other submission delays caused by dependencies would only occur if there are so many dependent operations that they would not fit in a single block.
-This represents according to our tests to a number above 30 dependent operations. In practice, this looks reasonable too. Remember that the only
-operations that could create dependencies are DID updates and credential revocations.
-
diff --git a/docs/node/rate-limiting.md b/docs/node/rate-limiting.md
deleted file mode 100644
index 6e3985d2ff..0000000000
--- a/docs/node/rate-limiting.md
+++ /dev/null
@@ -1,79 +0,0 @@
-# Rate limiting requests
-
-The critical APIs shloud have proper rate-limiters to prevent a single client from consuming Node resources. `envoy` provides ways to limit requests based on sequences of descriptors.
-To set up the rate-limiting, we have to do the following:
-1. Take the ratelimit service implementation:
-```
-git clone https://github.com/envoyproxy/ratelimit
-```
-2. Configure rate-limiters, here's a simple example:
-```
----
-domain: "atala-prism"
-descriptors:
- - key: remote_address
- rate_limit:
- unit: second
- requests_per_unit: 100
- descriptors:
- - key: path
- rate_limit:
- unit: second
- requests_per_unit: 20
- - key: path
- value: "/io.iohk.atala.prism.protos.NodeService/ScheduleOperations"
- rate_limit:
- unit: second
- requests_per_unit: 5
-```
-This config has three rate limits:
-- no more than 100 requests per second from a single client
-- no more than 20 requests per second from a single client with a specific gRPC call
-- no more than 5 requests per second from a single client invoking `/io.iohk.atala.prism.protos.NodeService/ScheduleOperations` API call.
-3. After that we can update `docker-compose.yaml` file specifying the path to the configuration, and start it using `docker-compose up`
-4. In the envoy config, we need to add a new cluster with rate-limiting service:
-```
- - name: rate_limit_service
- connect_timeout: 1s
- type: strict_dns
- lb_policy: round_robin
- http2_protocol_options: {} # enable H2 protocol
- load_assignment:
- cluster_name: rate_limit_service_load
- endpoints:
- - lb_endpoints:
- - endpoint:
- address:
- socket_address:
- address: localhost
- port_value: 8081
-```
-5. After that, we need to specify this cluster as an endpoint of the rate-limiter. To do so, we have to add this http_filter:
-```
- - name: envoy.filters.http.ratelimit
- typed_config:
- "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
- domain: atala-prism
- rate_limit_service:
- grpc_service:
- envoy_grpc:
- cluster_name: rate_limit_service
- transport_api_version: V3
-```
-6. Now we can create rate limiters in the `route` section by specifying sets of descriptors:
-```
-rate_limits:
-- actions:
- - remote_address: {}
- - request_headers:
- header_name: ":path"
- descriptor_key: path
-- actions:
- - remote_address: {}
-```
-
-Here we create two sets of descriptors that will be propagated to the service:
-- (remote_address, path)
-- (remote_address)
-
-
diff --git a/docs/on-premise-deployments/README.md b/docs/on-premise-deployments/README.md
deleted file mode 100644
index a4343b3521..0000000000
--- a/docs/on-premise-deployments/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Atala PRISM on-premise deployments
-While PRISM has terraform scripts to create new environments smoothly, those have locked us into AWS, preventing us to easily deploy PRISM to other environments, like custom virtual machines or custom servers, for example, the MoE deployment won't use AWS but their own servers.
-
-The goal for this document is to analyze what's required to deploy PRISM to on-premise instances by not depending on specific cloud providers (like AWS), the integration will be successful when we can deploy PRISM to AWS EC2 instances, Digital Ocean droplets, any other cloud supporting VMs, and even our local environment. All of this with as few manual steps as possible.
-
-Let's put these steps as the ideal scenario for on-premise deployments, while these could be automated in popular cloud providers, these are likely the minimum ones required:
-1. Create the VMs, grabbing the IP addresses to the cluster config (let's assume we have ssh-access to those VMs after creation).
-2. Get the necessary domains and set up the DNS, updating the relevant cluster config.
-3. Get the relevant TLS certificates and set those to the cluster config.
-4. Run an ansible playbook that will set everything up.
-
-Of course, this is an ideal scenario, and it is very likely that other manual steps will be required, like setting up special config in the actual physical machines (networking stuff), setting up a private container registry used for deployments, authentication related stuff, etc.
-
-**TO BE CONFIRMED**: One tricky detail for MoE is that they can cut the internet traffic to anything outside of Ethiopia which will prevent us from using any public service, for example, we need to use a self-hosted container registry, otherwise, deployments won't work when there is no internet connection.
-
-
-## Container orchestration
-When talking about on-premise instances, it is natural to think about container orchestration solutions like Kubernetes (K8s).
-
-Container orchestration tools aim to automate common workflows while dealing with deployments, like load balancing, self-healing services, service discovery, blue-green/canary deployments, and more.
-
-For example, we could define that 3 instances from PRISM connector are required, by using a container-orchestration tool, we'll be sure that there are 3 healthy instances running, if any instance fails, a new instance will be created automatically.
-
-While K8s is the most popular container orchestration tool, it is well known that it is a heavy tool, requiring considerable maintenance, like a specific team focused just in K8s.
-
-The proposal is to **NOT** use K8s but [Nomad](https://www.nomadproject.io), this [comparison](https://www.nomadproject.io/docs/nomad-vs-kubernetes/alternative) is very appealing for a small team like ours, besides that, these are the main reasons to consider Nomad:
-1. Simplicity in usage and maintainability
-2. IOHK DevOps team already uses Nomad which can be helpful to get internal support.
-3. Containers are just one way to run apps, Nomad supports running jar files too. Due to the MoE internet cut-off, we need to run our own container registry, if that option fails or turns out to be too complex, we can always fallback to run jar files instead.
-
-
-### Nomad integration
-In order to integrate Nomad, there are some details to be aware of:
-1. [Deployment system requirements](https://learn.hashicorp.com/tutorials/nomad/production-reference-architecture-vm-with-consul?in=nomad/production#system-reqs)
-2. A cluster is composed from nomad servers and nomad clients.
-3. There are 3 or 5 nomad servers recommended for the cluster, these are used to manage the cluster state. RAFT is used as the consensus algorithm.
-4. There is no restriction in the number of clients, these are used to deploy the applications.
-5. Every instance participating in the cluster needs to run the nomad app (either as a server or client mode).
-5. Every instance participating as a nomad client needs to have the necessary [drivers](https://www.nomadproject.io/docs/drivers) installed, in our case, it would be mainly [docker](https://www.nomadproject.io/docs/drivers/docker).
-
-Given the instance pre-requisites, it is natural to think on configuration provisioning tools like [ansible](https://www.ansible.com/), so that there is an automated workflow to get the nomad clients/servers ready.
-
-By leveraging ansible, we could get a simple flow to bootstrap a nomad cluster, like:
-1. Get the IP addresses for the instances that will host the nomad cluster (assume that you have ssh access to those).
-2. Create the necessary ansible hosts file by setting up the IP addresses related to nomad clients and the nomad servers.
-3. Run the ansible playbooks to configure the nomad cluster.
-
-It is worth adding that this approach works for on-premise instances as well as any popular cloud.
-
-At last, these are some useful details to know:
-1. Nomad exposes a nice [web UI](https://learn.hashicorp.com/collections/nomad/web-ui) which is unlikely to be exposed directly to the internet, a reverse proxy like nginx could be required.
-2. Operating the Nomad cluster requires access to its RPC API, an SSH tunnel could be required.
-
-
-## PRISM <> Nomad
-
-The internal IOHK's DevOps team listed the following tools as their current stack, we'll try to be similar to it ([source from Slack](https://input-output-rnd.slack.com/archives/CH4MQJAN8/p1620355932325200?thread_ts=1620327748.316900&cid=CH4MQJAN8)):
-- Ingress using traefik
-- Storage using glusterfs
-- Monitoring/logging using Victoria metrics
-- Job scheduling using nomad
-- Secret storage using vault
-- Service discovery using consul
-- Nomad to run the jobs themselves
-- Cue to define the jobs without having to understand nix
-- Terraform for infrastructure
-- Oauth2 for SSO auth to service frontends (such as monitoring)
-
-Deploying PRISM to on-premise instances is far from trivial, just look at these deployment dependency graphs which specify each server dependencies (Mirror/intdemo left out on purpose because MoE doesn't require those):
-- Nodes with no dependencies are the first to be deployed.
-- Nodes with dependencies can be deployed only after its dependencies.
-- A dependency like `a -> b` means that `a` must be deployed before `b`.
-- A topological order can allow us to deploy services in parallel and in the right order.
-
-### PRISM deployment
-![prism-deployment-graph](diagrams/prism-deployment.png)
-
-### Monitoring deployment
-![monitoring-deployment-graph](diagrams/monitoring-deployment.png)
-
-### Nomad deployment
-We'll require to deploy Consul, Nomad servers and Nomad clients, while the number of instances per cluster will be different that what's in the diagram, this should get you a better idea on what's required.
-
-![nomad-deployment-graph](diagrams/nomad-deployment.png)
-
-### Extra stuff
-There are some extra details to be aware of:
-- As pointed out previously, nomad UI likely needs a reverse proxy to be reachable from the internet (possibly envoy)
-- There is considerable network configuration required to get the cluster working properly.
-- Configuring the disk storage for a production deployment is far from trivial.
-- We'll likely need Consul/Vault integrated (not specified in the diagrams).
-- Envoy is likely going to be the way to communicate with PRISM from the public internet, where Consul will provide the discovery mechanism to reach out to the internal services (not specified in the diagrams).
-- A custom container registry is necessary before being able to do MoE deployments.
-
-### The plan
-The plan to get the Nomad integration working consists of several iterations:
-
-1. Get a Nomad cluster deployed manually to understand how to do it.
-2. Create ansible scripts to automate as much as possible from the Nomad cluster deployment.
-3. Create the Nomad jobs to get a minimal Node version deployed, this version won't launch Cardano by either using the instance we already have, or by using the in-memory ledger, avoid envoy or reverse proxies if possible.
-4. Get the PRISM minimal version deployed by Nomad, done by running the jobs manually, this involves Connector, Management Console, Vault, and Node (from the previous step).
-5. Get envoy deployed so that grpcweb can be used with the new environment.
-6. Start picking logs to the monitoring services we have set up.
-7. Evaluate the security model and do any necessary/critical improvements, TLS certificates, ACL for accessing Nomad's UI, use private networks for the cluster, open the minimal necessary ports in the instances, etc.
-8. Evaluate and compare the new approach to the existing terraform-based deployment, possibly make a plan to improve the existing approach.
-9. Assuming that Nomad plays a nice role in our internal deployments, set it up by leveraging CircleCi, it is likely that it will be a mix between Ansible/Terraform.
-10. Try to do a deployment similar to what should be done in MoE which should allow us finding missing details.
-11. Consider integrating auto-scalling policies.
-12. Evaluate how we can improve everything with Consul.
-13. Evaluate how to improve secret-management with Vault.
-
-## Resources
-- [HashiCorp Nomad on AWS](https://aws.amazon.com/quickstart/architecture/nomad/)
-- [Why you should take a look at Nomad before jumping on Kubernetes](https://atodorov.me/2021/02/27/why-you-should-take-a-look-at-nomad-before-jumping-on-kubernetes/)
-- [Nomad docs](https://www.nomadproject.io/docs)
-- [Consul <> Envoy Integration](https://www.consul.io/docs/connect/proxies/envoy)
diff --git a/docs/on-premise-deployments/diagrams/monitoring-deployment.png b/docs/on-premise-deployments/diagrams/monitoring-deployment.png
deleted file mode 100644
index f1b8489c19..0000000000
Binary files a/docs/on-premise-deployments/diagrams/monitoring-deployment.png and /dev/null differ
diff --git a/docs/on-premise-deployments/diagrams/monitoring-deployment.puml b/docs/on-premise-deployments/diagrams/monitoring-deployment.puml
deleted file mode 100644
index 28b4bd3378..0000000000
--- a/docs/on-premise-deployments/diagrams/monitoring-deployment.puml
+++ /dev/null
@@ -1,32 +0,0 @@
-@startuml
-digraph prism_monitoring_on_premise_deployment {
- # this is deployment dependency graph:
- # - nodes with no dependencies are the first to be deployed.
- # - nodes with dependencies can be deployed only after its dependencies.
- # - "a -> b" means that "b" can be deployed only when "a" is ready.
- # - a topological order can allow us to deploy services in parallel and in the right order.
-
- # prism is the whole deployment without considering how to talk to it from the public internet
- prism
-
- # storage components
- # each of these likely needs to RAID
- storage_prometheus
- storage_elasticsearch
-
- # all the logs get collected to elasticsearch (which will likely require a cluster)
- elasticsearch
- kibana
-
- # all system/server metrics get collected by prometheus
- prometheus
- grafana
-
- storage_prometheus -> prometheus
- prometheus -> grafana
- prism -> prometheus
-
- storage_elasticsearch -> elasticsearch
- elasticsearch -> kibana
- prism -> kibana}
-@enduml
diff --git a/docs/on-premise-deployments/diagrams/nomad-deployment.png b/docs/on-premise-deployments/diagrams/nomad-deployment.png
deleted file mode 100644
index 772c0d1d3b..0000000000
Binary files a/docs/on-premise-deployments/diagrams/nomad-deployment.png and /dev/null differ
diff --git a/docs/on-premise-deployments/diagrams/nomad-deployment.puml b/docs/on-premise-deployments/diagrams/nomad-deployment.puml
deleted file mode 100644
index ea21665521..0000000000
--- a/docs/on-premise-deployments/diagrams/nomad-deployment.puml
+++ /dev/null
@@ -1,53 +0,0 @@
-@startuml
-digraph prism_nomad_on_premise_deployment {
- # this is deployment dependency graph:
- # - nodes with no dependencies are the first to be deployed.
- # - nodes with dependencies can be deployed only after its dependencies.
- # - "a -> b" means that "b" can be deployed only when "a" is ready.
- # - a topological order can allow us to deploy services in parallel and in the right order.
-
- # prism is the whole deployment without considering how to talk to it from the public internet
- prism
-
-
- # consul is needed for service discovery
- consul_1
- consul_2
- consul_3
- consul_cluster
-
- consul_1 -> consul_cluster
- consul_2 -> consul_cluster
- consul_3 -> consul_cluster
-
- # nomad
- nomad_server_1
- nomad_server_2
- nomad_server_3
- nomad_servers
-
- nomad_client_1
- nomad_client_2
- nomad_client_N
- nomad_clients
-
- nomad_cluster
- nomad_server_1 -> nomad_servers
- nomad_server_2 -> nomad_servers
- nomad_server_3 -> nomad_servers
- nomad_client_1 -> nomad_clients
- nomad_client_2 -> nomad_clients
- nomad_client_N -> nomad_clients
-
- nomad_servers -> nomad_client_1
- nomad_servers -> nomad_client_2
- nomad_servers -> nomad_client_N
- nomad_servers -> nomad_cluster
- nomad_clients -> nomad_cluster
-
- consul_cluster -> nomad_server_1
- consul_cluster -> nomad_server_2
- consul_cluster -> nomad_server_3
- nomad_cluster -> prism
-}
-@enduml
diff --git a/docs/on-premise-deployments/diagrams/prism-deployment.png b/docs/on-premise-deployments/diagrams/prism-deployment.png
deleted file mode 100644
index 568411dda6..0000000000
Binary files a/docs/on-premise-deployments/diagrams/prism-deployment.png and /dev/null differ
diff --git a/docs/on-premise-deployments/diagrams/prism-deployment.puml b/docs/on-premise-deployments/diagrams/prism-deployment.puml
deleted file mode 100644
index a8ca02fedd..0000000000
--- a/docs/on-premise-deployments/diagrams/prism-deployment.puml
+++ /dev/null
@@ -1,82 +0,0 @@
-@startuml
-digraph prism_on_premise_deployment {
- # this is deployment dependency graph:
- # - nodes with no dependencies are the first to be deployed.
- # - nodes with dependencies can be deployed only after its dependencies.
- # - "a -> b" means that "b" can be deployed only when "a" is ready.
- # - a topological order can allow us to deploy services in parallel and in the right order.
-
- # prism is the whole deployment without considering how to talk to it from the public internet
- prism
-
- # cardano components
- cardano_node
- cardano_wallet
- cardano_dbsync
-
- # storage components
- # each of these likely needs to RAID
- storage_postgres_node
- storage_postgres_connector
- storage_postgres_vault
- storage_postgres_console
- storage_postgres_dbsync
- storage_cardano_node
-
- # postgres databases
- # each of these likely needs multiple instances with replication, and,
- # possibly, a connection pool like PgBouncer
- postgres_node
- postgres_connector
- postgres_vault
- postgres_console
- postgres_dbsync
-
- # each postgres instance needs storage
- storage_postgres_node -> postgres_node
- storage_postgres_connector -> postgres_connector
- storage_postgres_vault -> postgres_vault
- storage_postgres_console -> postgres_console
- storage_postgres_dbsync -> postgres_dbsync
-
- # cardano node requires storage because the blockchain data is huge
- storage_cardano_node -> cardano_node
- postgres_dbsync -> cardano_dbsync
-
- # the prism backend services
- # each of these likely needs many instances and a load balancer in front
- "node"
- connector
- console
- vault
-
- console_web_nginx
- docs_website_nginx
-
- postgres_node -> "node"
- postgres_connector -> connector
- postgres_vault -> vault
- postgres_console -> console
-
- cardano_node -> cardano_wallet
- cardano_node -> cardano_dbsync
-
- cardano_wallet -> "node"
- cardano_dbsync -> "node"
-
- "node" -> connector
- "node" -> console
- "node" -> vault
-
- "node" -> prism_backend
- connector -> prism_backend
- vault -> prism_backend
- console -> prism_backend
-
- prism_backend -> envoy
- docs_website_nginx -> envoy
- console_web_nginx -> envoy
-
- envoy -> prism
-}
-@enduml
diff --git a/docs/payments/README.md b/docs/payments/README.md
deleted file mode 100644
index 337d577e63..0000000000
--- a/docs/payments/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-## Payment flow
-
-Receiving payments is an important part of the credential project, as it is the way to provide us with revenue. We want to cooperate with a Payment Processor - third party that handles transactions by credit cards, debit cards, and possibly other kinds of payments such as bank transfers or cryptocoins. This document aims to list the requirements that should be met in order to let us integrate with it.
-
-This document focuses on the case where it is the student making the payment, but it is without loss of generality as all flows can be applied to the issuer or the verifier as well.
-
-### Synchronous vs asynchronous payments
-
-There are two possible ways of realizing payments: one as a part of UI flow, not letting them progress further until they finish it, the other listing pending payments and starting new flow, when the payment is confirmed.
-
-In case of asynchronous payments it is fine with us if the payment confirmation takes some time, e.g. it arrives the next day. For synchronous payments it is crucial to have a payment processor that is able to confirm payment quick, e.g. in 30 seconds.
-
-Synchronous payments are utilized in current design, so it is required to have Payment Processor support.
-
-### Payment request flow
-
-![Payment request flow diagram](diagrams/payment-request-flow.png)
-
-Flow starts with directing student to the payment interface using generated payment link. Student then interacts with the Payment Processor, using their interface to make the payment.
-
-After that we need to receive status update, there are three possibilities:
-
-* by querying the interface, possibly repeatedly,
-* by exposing an endpoint (REST one probably) where the Payment Processor can push notifications,
-* by querying batches of payment status updates; that is the least convenient options, as we probably wouldn't like to make such queries for each payment, so we would have single stream of updates and dispatch it to payments.
-
-### Manual payment flow
-
-![Manual payment flow diagram](diagrams/manual-payment-flow.png)
-
-This is obviously inferior payment flow as it forces the student to manually type payment data, but it is an option. It starts with us showing the student reference code in the UI. They have to initiate the payment themselves, providing the reference code so we can later correlate the payment with our records. Payment is finalized in one of two ways, similar to these in payment request flow.
-
-## Requirements
-
-### Payment link generation
-
-IOHK can request Payment Processor to generate an one time payment link that can be opened by the student in order to pay us. Student should not have to fill in payment receiver details, it should be already set up for him.
-
-Other concepts that are extensions of this functionality are:
-* IFrame components: ability to generate an IFrame to be used in the UI,
-* In case of mobile phones if Payment Processor has their own app, the equivalent is to generate iOS application launch URL or Android Intent data. App should open with IOHK as a payment receiver and amount pre-filled. Another option is Payment Processor providing SDK to be included in our application.
-
-__Importance: Very High__ for UI links, __Medium__ for mobile apps. It is required for intuitive payments.
-
-### Reference data in payment
-
-Student can include reference data in the payment, so we can use it to correlate the payment with what it pays for. It is required if it is the student who initiates the payment. In case of using payment links, payment link id can be used for referencing, as long as it is unique.
-
-__Importance: Very High__, required for automatic correlation of payment request and payments.
-
-### Getting status of single payment
-
-IOHK can query Payment Processor, providing payment id, receiving current payment status.
-
-__Importance: Very High__, best way to verify payment result.
-
-### Payment status change notification
-
-IOHK can configure an endpoint (e.g. REST endpoint) to be notified by Payment Processor about all status changes.
-
-__Importance: Medium__, might speed up payment detection, but that is not a big concern.
-
-### Listing status changes of payments
-
-IOHK can query Payment Processor for status changes.
-
-__Importance: Medium__, ability to bulk load status updates might be useful for being able to provide full payment history.
-
-### Fast confirmation
-
-Payment processor is able to confirm payments quick enough for including it as a part of UI flow, below a minute.
-
-__Importance: High__, in order to provide fluid User Interface on our side.
-
-### Storing credit card numbers
-
-Credit Card details entered once should be stored on Payment Processor system (PCI compliant) to make further payments easier.
-
-__Importance: High__, as it is in product requirements.
-
-### Verifiable payment proofs
-
-Payment processor provides student a downloadable payment proof.
-
-__Importance: Low__, might be useful in disuptes.
-
-### Refunds
-Payment processor provides a method to refund the student that paid for something we found we or the University is not able to provide - e.g. they have been given connection token, but after verification found that they have failed their exam.
-
-__Importance: Medium__: because of specifics of credentials refunds should be very rare and could be handled manually.
diff --git a/docs/payments/diagrams/manual-payment-flow.png b/docs/payments/diagrams/manual-payment-flow.png
deleted file mode 100644
index 25629eb3c5..0000000000
Binary files a/docs/payments/diagrams/manual-payment-flow.png and /dev/null differ
diff --git a/docs/payments/diagrams/manual-payment-flow.puml b/docs/payments/diagrams/manual-payment-flow.puml
deleted file mode 100644
index 94ffb437fe..0000000000
--- a/docs/payments/diagrams/manual-payment-flow.puml
+++ /dev/null
@@ -1,27 +0,0 @@
-@startuml
-title Payments - manual flow
-
-participant IOHK
-participant Processor
-actor Student
-
-IOHK -> Student : Request a payment, showing a reference code
-
-Student -> Processor : Send a payment using Processor interface,\nproviding the reference code to be included
-Processor -> Student: Confirm payment
-
-...Two possible finalization scenarios...
-alt Push API
- Processor -> IOHK: New payment received
-
-else Batch pull API
- IOHK -> Processor : Get payments
- Processor -> IOHK : Batch of payment status updates,\nincluding the one from the student
-
-end
-
-...
-
-IOHK -> Student : Payment received
-
-@enduml
\ No newline at end of file
diff --git a/docs/payments/diagrams/payment-request-flow.png b/docs/payments/diagrams/payment-request-flow.png
deleted file mode 100644
index 5505d4e6ef..0000000000
Binary files a/docs/payments/diagrams/payment-request-flow.png and /dev/null differ
diff --git a/docs/payments/diagrams/payment-request-flow.puml b/docs/payments/diagrams/payment-request-flow.puml
deleted file mode 100644
index a9b436b1cc..0000000000
--- a/docs/payments/diagrams/payment-request-flow.puml
+++ /dev/null
@@ -1,32 +0,0 @@
-@startuml
-title Payments - flow with request
-
-participant IOHK
-participant Processor
-actor Student
-
-IOHK -> Processor : Request one time payment link
-Processor -> IOHK : Link created
-IOHK -> Student : Send the payment link
-
-Student -> Processor : Open the payment link
-Processor -> Student : Show payment interface
-Student -> Processor : Fill in all data
-Processor -> Student : Confirm payment
-
-...Three possible finalization scenarios...
-
-alt Pull API
- IOHK -> Processor : Check payment status
- Processor -> IOHK : Payment confirmed
-
-else Push API
- Processor -> IOHK: Payment status change
-
-else Batch pull API
- IOHK -> Processor : Get payment status changes
- Processor -> IOHK : Batch of payment status updates,\nincluding the one from the student
-end
-...
-IOHK -> Student : Payment received
-@enduml
diff --git a/docs/protocol/.gitignore b/docs/protocol/.gitignore
deleted file mode 100644
index 3d7571e944..0000000000
--- a/docs/protocol/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.pdf
-*.docx
diff --git a/docs/protocol/Dockerfile b/docs/protocol/Dockerfile
deleted file mode 100644
index d07b6893e9..0000000000
--- a/docs/protocol/Dockerfile
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM ubuntu:focal
-
-ENV DEBIAN_FRONTEND noninteractive
-ENV TERM xterm-256color
-
-RUN adduser --disabled-password --gecos '' atala
-
-RUN apt update
-RUN apt install -y texlive-xetex
-RUN apt install -y pandoc pandoc-citeproc
-RUN apt install -y git
-
-WORKDIR /home/atala
-ADD . .
-RUN chown atala:atala * .*
-
-USER atala
-
diff --git a/docs/protocol/README.md b/docs/protocol/README.md
deleted file mode 100644
index 816430cb6b..0000000000
--- a/docs/protocol/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-This folder has documentation for our Layer-2 protocol, internally called "Slayer".
-
-The `make.sh` script generates a nice PDF from all the MD files. The `Dockerfile` specifies the dependencies needed to run `make.sh`.
\ No newline at end of file
diff --git a/docs/protocol/biblio.bib b/docs/protocol/biblio.bib
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/docs/protocol/canonicalization.md b/docs/protocol/canonicalization.md
deleted file mode 100644
index 5294b64c5d..0000000000
--- a/docs/protocol/canonicalization.md
+++ /dev/null
@@ -1,351 +0,0 @@
-
-
-\newpage
-
-# Canonicalization (and comments on signing)
-
-Our protocol uses cryptographic signatures and hash functions to guarantee its security (along with other properties).
-The use of these cryptographic primitives requires to translate programming languages data representations into
-sequences of bytes. This process receives the name of _data serialization_. Once data is serialized, we can hash and/or
-sign it.
-
-In this document, we will describe what we are doing and we will explore some challenges related to signing data and
-exchanging it between many applications. At the of the document, we will describe a rudimentary signing technique that
-was implemented as a proof of concept for credentials. We will also comment about a more robust implementation and
-future challenges that may come.
-
-## Our current state
-
-There are mainly three places where we are hashing/signing data.
-
-- During Atala Operations construction
-- When we compute a DID suffix, where we hash the initial DIDData
-- When we need to sign credentials
-
-Let's explore the approach we have on the three situations.
-
-### Signing atala operations
-
-The way in which we are generating the bytes to sign from Atala Operations is through protobuf messages. We have:
-
-```
-message AtalaOperation {
- oneof operation {
- CreateDIDOperation createDid = 1;
- UpdateDIDOperation updateDid = 2;
- IssueCredentialOperation issueCredential = 3;
- RevokeCredentialOperation revokeCredential = 4;
- };
-}
-
-message SignedAtalaOperation {
- string signedWith = 1; // id of key used to sign
- bytes signature = 2; // signature of byte encoding of the operation
- AtalaOperation operation = 3;
-}
-```
-
-In order to construct a `SignedAtalaOperation`:
-
-1. We construct the needed `AtalaOperation` message using the corresponding protobuf model
-2. We extract the bytes produced by protobuf based on the model
-3. We sign those bytes and we build a `SignedAtalaOperation` message that is then serialized and posted as part of
- transaction metadata.
-
-Nodes later validate the signature in the following way:
-
-1. They see the message in transactions metadata
-2. They decode the `SignedAtalaOperation` and extract the key, signature and the operation that was theoretically signed
-3. They serialize again the `AtalaOperation` into a sequence of bytes and check the signature against those bytes
-
-The process works independently of the programming language and platform used to generate the signature and the one used
-to verify it because protobuf is _currently_ providing the same bytes from our messages in all platforms. This means
-that protobuf is _currently_ providing a _canonical_ bytes representation of the serialized data.
-
-However, we must remark that this is not a feature that protobuf guarantees nor provides in all situations. For example,
-if our models use [maps](https://developers.google.com/protocol-buffers/docs/proto3#maps) then, the "canonical bytes"
-property we rely on would be lost, because different languages may encode maps in different ways. Furthermore, protobuf
-[specification](https://developers.google.com/protocol-buffers/docs/encoding#implications) advise to not assume the byte
-output of a serialized message is stable.
-
-If the application that creates the `AtalaOperation` would generate different bytes as serialization than the ones the
-node generates when serializing the operation, then the signature validation process would fail (because the bytes
-signed by the first application would not match the bytes used by the node during verification). This could for example
-happen if the verifying party is using old versions of the protobuf models.
-
-In order to solve these issues, we should consider to attach the signed bytes "as is" in the encoded protobuf messages.
-For example, for `SignedAtalaOperation` we should refactor the message to:
-
-```
-message SignedAtalaOperation {
- string signedWith = 1; // id of key used to sign
- bytes signature = 2; // signature of byte encoding of the operation
- bytes operation = 3;
-}
-```
-
-### Computing DID suffix
-
-For the computation of the DID suffix of a given initial state of a DID Document, we face a similar situation as the one
-before. Our protocol defines that the DID suffix associated to a DID Document is the hash of certain DID Data associated
-with the initial state of the document. An important property we have is that clients are able to compute a DID suffix
-without the need of publishing it.
-
-***NOTE***: We are currently hashing the entire `AtalaOperation` (that contains a `CreateDIDOperation`) and not the
-`DIDData` part.
-
-The way in which we achieve consistent hashes in the client and node side is that given the models associated to these
-protobuf messages:
-
-```
-enum KeyUsage {
- // UNKNOWN_KEY is an invalid value - Protobuf uses 0 if no value is provided and we want user to explicitly choose the usage
- UNKNOWN_KEY = 0;
- MASTER_KEY = 1;
- ISSUING_KEY = 2;
- COMMUNICATION_KEY = 3;
- AUTHENTICATION_KEY = 4;
-}
-
-message ECKeyData {
- string curve = 1;
- bytes x = 2;
- bytes y = 3;
-}
-
-message PublicKey {
- string id = 1;
- KeyUsage usage = 2;
- oneof keyData {
- ECKeyData ecKeyData = 8;
- };
-}
-
-message DIDData {
- string id = 1; // DID suffix, where DID is in form did:atala:[DID suffix]
- repeated PublicKey publicKeys = 2;
-}
-
-message CreateDIDOperation {
- DIDData didData = 1; // DIDData with did empty id field
-}
-```
-
-both (client and node) can construct the DID suffix by hashing the bytes of the corresponding `AtalaOperation`
-message. Note again, that this still depends on the weak assumption that we can trust on the stability of the bytes
-obtained.
-
-### Credential signing
-
-There is currently a PoC that needs to be reviewed on this topic. So we won't expand about any approach for now.
-We will document the final approach in this section. At the end of this document, there are some comments on a simple
-approach.
-
-## Comments on JSON
-
-There has been conversation related to the use of JSON to model credentials. It is also the case that both
-[DID Core](https://www.w3.org/TR/did-core/) and [Verifiable Credentials Data Model](https://www.w3.org/TR/vc-data-model/#syntaxes)
-drafts provide JSON and JSON-LD based descriptions of their data models.
-
-According to [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf),
-JSON is a text syntax that facilitates structured data interchange between all programming languages. However, on the
-same document it is stated that
-
-> The JSON syntax is not a specification of a complete data interchange. Meaningful data interchange requires agreement
-> between a producer and consumer on the semantics attached to a particular use of the JSON syntax. What JSON does
-> provide is the syntactic framework to which such semantics can be attached.
-
-JSON's simplicity favoured its wide adoption. However, this adoption came with some interoperability problems.
-
-[ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf) says:
-
-> The JSON syntax does not impose any restrictions on the strings used as names, does not require that name strings be
-> unique, and does not assign any significance to the ordering of name/value pairs. These are all semantic
-> considerations that may be defined by JSON processors or in specifications defining specific uses of JSON for data
-> interchange.
-
-On [JSON RFC 8259](https://www.rfc-editor.org/rfc/rfc8259.txt) we see statements like:
-
-> The names within an object SHOULD be unique.
-
-Meaning that names could be repeated according to [RFC 8174](https://tools.ietf.org/html/rfc8174) definition of "SHOULD"
-
-The RFC mentions differences on implementations based on repeated fields:
-
-> An object whose names are all unique is interoperable in the sense that all software implementations receiving that
-> object will agree on the name-value mappings. When the names within an object are not unique, the behavior of software
-> that receives such an object is unpredictable. Many implementations report the last name/value pair only. Other
-> implementations report an error or fail to parse the object, and some implementations report all of the name/value
-> pairs, including duplicates.
-
-On the topic of element ordering the same RFC says:
-
-> JSON parsing libraries have been observed to differ as to whether or not they make the ordering of object members
-> visible to calling software. Implementations whose behavior does not depend on member ordering will be interoperable
-> in the sense that they will not be affected by these differences.
-
-which is a relevant point to obtain canonicalization if one needs to hash/sign the same JSON in multiple applications.
-
-The RFC and ECMA are consistent with respect to the definition of JSON texts. However, ECMA-404 allows several practices
-that the RFC specification recommends avoiding in the interests of maximal interoperability.
-
-[DID Core](https://www.w3.org/TR/did-core/) draft used to define DID Documents **as** JSONs. At the time of this writing
-the specification has no text in the [Data model section](https://www.w3.org/TR/did-core/#data-model) and does not
-define a DID Document as a JSON anymore. It does talk about JSON, JSON-LD and CBOR core representations.
-
-On section 8 ([Core representations](https://www.w3.org/TR/did-core/#core-representations)) it says:
-
-> All concrete representations of a DID document MUST be serialized using a deterministic mapping that is able to be
-> unambiguously parsed into the data model defined in this specification. All serialization methods MUST define rules
-> for the bidirectional translation of a DID document both into and out of the representation in question. As a
-> consequence, translation between any two representations MUST be done by parsing the source format into a DID document
-> model (described in Sections § 6. Data Model and § 3.3 DID Documents) and then serializing the DID document model into
-> the target representation. An implementation MUST NOT convert between representations without first parsing to a DID
-> document model.
-
-The lack of a canonical binary representation of JSON texts makes them not ideal for cryptographic treatment. There are
-different proposals to get canonical JSON serialization, none of which seems to be considered a formal standard.
-
-### JCS
-
-[JCS (IETF Draft 17)](https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-17) is a canonization
-proposal for JSON texts. The JCS specification defines how to create a canonical representation of JSON data by building
-on the strict serialization methods for JSON primitives defined by ECMAScript, constraining JSON data to the I-JSON
-[RFC7493](https://www.rfc-editor.org/rfc/rfc7493.html) subset, and by using deterministic property sorting.
-
-We found [implementations](https://github.com/cyberphone/json-canonicalization) in different languages, including Java.
-There is also an available [JWS-JCS PoC](https://github.com/cyberphone/jws-jcs).
-
-## On JSON-LD, RDF and LD-PROOFS
-
-Another alternative proposed so far is the use of Linked Data structures and LD-PROOFS.
-
-[LD-PROOFS](https://w3c-ccg.github.io/ld-proofs/) is an experimental specification on how to sign Linked Data. At the
-time of this writing, the work published doesn't seem robust. The draft is from March 2020 and says:
-
-> This specification was published by the W3C Digital Verification Community Group. It is not a W3C Standard nor is it
-> on the W3C Standards Track. Please note that under the W3C Community Contributor License Agreement (CLA) there is a
-> limited opt-out and other conditions apply. Learn more about W3C Community and Business Groups.
-
-> This is an experimental specification and is undergoing regular revisions. It is not fit for production deployment.
-
-The document doesn't clearly define any proof type or explain how to use them. Furthermore, on the section titled
-[Creating New Proof Types](https://w3c-ccg.github.io/ld-proofs/#creating-new-proof-types), we infer that the
-specification plans to describe with Linked Data representation (illustrated with [JSON-LD](https://www.w3.org/TR/json-ld/)
-properties like `canonicalizationAlgorithm`, which would refer to the canonicalization algorithm used to produce the
-proof. Hence, this does not seem to focus on defining a canonical representation. It instead attempts to give a way to
-inform all needed data to verify a generated proof.
-
-[JSON-LD](https://www.w3.org/TR/json-ld/) is a specific JSON based format to serialise Linked Data. At the time of this
-writing, its status is a Candidate Recommendation and is believed to soon become an endorsed recommendation from W3C.
-In particular,
-
-> JSON-LD is a concrete RDF syntax as described in [RDF11-CONCEPTS](https://www.w3.org/TR/rdf11-concepts/).
-> Hence, a JSON-LD document is both an RDF document and a JSON document and correspondingly represents an instance of
-> an RDF data model. However, JSON-LD also extends the RDF data model...
->
-> Summarized, these differences mean that JSON-LD is capable of serializing any RDF graph or dataset and most, but not
-> all, JSON-LD documents can be directly interpreted as RDF as described in RDF 1.1 Concepts.
-
-[RDF](https://www.w3.org/TR/rdf11-concepts/) is another syntax used to describe resources and linked data.
-In particular, RDF is an official W3C recommendation.
-
-[RDF-JSON](https://www.w3.org/TR/rdf-json/) was an initiative to represent JSON documents with RDF. However, we can see
-the following message in the draft page:
-
-> The RDF Working Group has decided not to push this document through the W3C Recommendation Track. You should therefore
-> not expect to see this document eventually become a W3C Recommendation.
-> This document was published as a Working Group Note to provide those who are using it and/or have an interest in it
-> with a stable reference.
-> The RDF Working Group decided to put JSON-LD on the Recommendation track. Therefore, unless you have a specific reason
-> to use the syntax defined in this document instead of JSON-LD, you are encouraged to use JSON-LD.
-
-We didn't invest further time researching on Linked Data signatures after exploring LD-PROOFs. If needed, we could ask
-for an update in DIF slack.
-
-## Comments on credentials' signing
-
-Research aside, a simple process we could follow to sign credentials is:
-
-1. Model the credential data as a JSON due to its flexibility
-2. Serialize the JSON to an array of bytes
-3. Sign the bytes
-4. Define our `GenericCredential` as based64url of the serialized bytes of the following JSON
-
-```
-{
- signature: base64url(signature),
- credential: base64url(bytes(credential_data))
-}
-```
-
-The recipient will have to store the bytes "as is" to preserve a canonical hashable representation.
-
-Alternatively, instead of using JSON we could create the following protobuf message
-
-```
-message GenericCredential {
- bytes signature = 1;
- bytes credential = 2;
-}
-```
-
-In both cases, the signing key reference can live inside the credential.
-The use of protobuf, as an alternative to JSON, allows to obtain the bytes of the `GenericCredential` and hash them to
-post them in the blockchain. Later, if the credential is shared to a client using another implementation language, we
-wouldn't need to worry about canonicalization to compute the hash as long as the protobuf message is exchanged. This is
-the same trick we are using for operation signing and DID suffix computation.
-
-An even simpler approach was implemented as a
-[proof of concept](https://github.com/input-output-hk/atala/commit/03999acb5e2a1c6da461b0db89d94c6183d52c71).
-In that PoC, the generic credential was represented as a pair of dot (.) separated strings, where the first string
-represented the encoded bytes of the credential signed and the second string represented the encoded bytes of the
-signature.
-
-```
- base64url(bytes(credential)).base64url(signature)
-```
-
-This model resembles to [JWS](https://tools.ietf.org/html/rfc7515). The main difference is that JWS contains a header
-with key reference, signing algorithm information and other data. We are planning to implement a JWS based version of
-verifiable credentials for MVP scope.
-
-## A note on future goals (selective disclosure)
-
-We want to remark that, in order to implement selective disclosure, we may probably need something different to the
-approach described in the previous section.
-For example, if we use a Merkle Tree based approach, the bytes to sign are the ones corresponding to the merkle root of
-the eventual generic credential tree and not the credential bytes themselves. This implies that the credential to share
-could be of the form:
-
-```
-{
- proof: base64url( bytes( {
- signature: base64url(signature),
- root: base64url(merkle_root)
- } ) ),
- credential: Actual_credential_data
-}
-```
-
-and we would need a canonical transformation from the credential data to the Merkle Tree used to compute the root. The
-hash of the `proof` property would be posted as a witness on-chain.
-Later, on selective disclosure, a user would send the following data:
-
-```
-{
- witness: base64url( hash(the `proof` property defined above) ),
- data_revealed: [
- {
- value: value_revealed,
- nonce: a nonce associated to the revealed value,
- path: [ ... ], // basicaly a list of left-right indicators
- hashes: [ ...] // list of base64url hashes that need to be appended following `path` instructions to recompute the
- // merkle root
- },
- ...
- ]
-}
-```
-
-This may also be changed if we move toward ZK proofs.
diff --git a/docs/protocol/intro.md b/docs/protocol/intro.md
deleted file mode 100644
index 0481e7da62..0000000000
--- a/docs/protocol/intro.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: PRISM Slayer Protocol Specification
----
-
-# Intro
-
-This is a description of PRISM's 2nd layer protocol, internally called "Slayer".
-Historically, it has evolved in three variations (v1, v2, v3), which are
-provided here as the following three sections. Each variation has its own
-document, so we simply concatenate the three documents in order to create the
-current one. The original markdown source can be found in the
-[repository](https://github.com/input-output-hk/atala/tree/develop/prism-backend/docs/protocol).
-
-In Slayer v1, the reference to Bitcoin is there because Bitcoin has been our
-initial target blockchain. Since then, we have transitioned to Cardano.
-Currently there is a tension between keeping Slayer's original
-blockchain-agnostic nature versus tying it to Cardano by leveraging its more
-flexible metadata feature.
-
-The final sections of this document describe canonicalization, key derivation,
-unpublished DIDs, the _late publication_ attack, and some more ideas on evolving
-the protocol.
-
-This work is **CONFIDENTIAL** and © [IOHK](https://iohk.io). The technology
-we describe here powers [atalaprism.io](https://atalaprism.io) and the
-respective web and mobile applications.
diff --git a/docs/protocol/key-derivation-and-unpublished-dids.md b/docs/protocol/key-derivation-and-unpublished-dids.md
deleted file mode 100644
index 0b24b8a3ca..0000000000
--- a/docs/protocol/key-derivation-and-unpublished-dids.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# Key derivation and unpublished DIDs
-
-This document summarizes 3 related conversations around the topic of key types, unpublished DIDs and key
-derivation/recovery.
-
-## Unpublished DIDs and recovery process
-
-In our first [key derivation document](./key-derivation.md), we proposed a procedure to deterministically create and
-recover DIDs. Later, in we introduced [unpublished DIDs](./unpublished-dids.md) to our system, which consist of DIDs
-that can be used without the need of publishing them on-chain.
-
-This new type of DIDs created a conflict with the derivation and recovery algorithm. Initially, the algorithm intended
-to:
-- derive a DID
-- attempt to resolve it
- - if found, derive all the keys present in the DID document, and repeat the process with the next DID
- - if not found, stop the recovery process
-
-This means that unpublished DIDs cannot be recovered with that process because there is no on-chain history of their
-existence. Furthermore, the development of multiple products is also creating a segregated space for where these DIDs
-are used. We do not have a proposal to recover unpublished DIDs automatically.
-
-### Proposal - generate a recovery file standard structure
-
-We propose for the above problem to define an standard way to represent the list of derived DIDs (or their paths)
-that are generated by the user. The standardized structure will allow to import the file and use it during the
-recovery process. The file will be complementary to the seed phrase and could be encrypted by a key derived from
-the same seed.
-
-## Key types
-
-A potential problem with our current use of unpublished DIDs is that they have only one key, which is a master key. If
-we want to use unpublished DIDs for multiple uses, it could be more secure to have more keys for specific purposes.
-Having separate keys for communication, authentication, and/or other actions could allow to keep the master key in cold
-storage while other keys are used with higher frequency.
-
-The [DID spec](https://www.w3.org/TR/did-core/#verification-relationships) mentions some optional key types commonly
-accepted by implementers:
-
-- Authentication
- > The authentication verification relationship is used to specify how the DID subject is expected to be authenticated,
- > for purposes such as logging into a website or engaging in any sort of challenge-response protocol.
-- Assertion
- > The assertionMethod verification relationship is used to specify how the DID subject is expected to express claims,
- > such as for the purposes of issuing a Verifiable Credential.
-- Key Agreement
- > The keyAgreement verification relationship is used to specify how an entity can generate encryption material in
- > order to transmit confidential information intended for the DID subject, such as for the purposes of establishing
- > a secure communication channel with the recipient.
-- Capability Invocation
- > The capabilityInvocation verification relationship is used to specify a verification method that might be used by
- > the DID subject to invoke a cryptographic capability, such as the authorization to update the DID Document.
-- Capability Delegation
- > The capabilityDelegation verification relationship is used to specify a mechanism that might be used by the DID
- > subject to delegate a cryptographic capability to another party, such as delegating the authority to access a
- > specific HTTP API to a subordinate.
-
-We can also see an example DID method that resembles our unpublished DIDs, name [DID key](https://w3c-ccg.github.io/did-method-key/)
-These DIDs are not updatable and also have a single key for all purposes.
-
-Currently, we define the following keys:
-- MASTER_KEY: Used to update the associated DID doument
-- ISSUING_KEY: Used for credentials issuance and revocation.
-- COMMUNICATION_KEY: Used for encrypting messages sent to the DID subject
-- AUTHENTICATION_KEY: This key-type is used to authenticate requests or logging into services.
-
-A proposal under discussion is whether we want to add more keys to the initial state of a DID document (i.e. to our
-unpublished DIDs). This would affect our deterministic DID derivation algorithm.
-
-### Proposal
-
-We want to change the `COMMUNICATION_KEY` type to `KEY_AGREEMENT` in order to align with current specs.
-We also want to incorporate the types `ASSERTION_KEY` and `REVOCATION_KEY` (see next section).
-
-With respect to the initial state of our DIDs, we would like them to contain one master key, one
-authentication key, one assertion key, and one key agreement key. However, we should consider if
-it is better to have a different key for each type of key than to share a single key in multiple
-roles. For example, we could have a master key, and then we could have a single separate key for
-the other roles.
-
-We will not add an issuing key nor revocation key to the initial state of our DIDs, the use of
-those keys requires operations to be posted on-chain. Therefore, the user will be able to post
-a DID update with the first issuance/revocation operation.
-
-## Revocation keys
-
-We have also noticed the improvement of adding a new key type to our protocol, namely, revocation keys. The idea is to
-split the function of `issuing` keys into two parts. The existing issuing keys will still be used to sign
-`IssueBatchOperation`s. However, in order to revoke batches or credentials, the `RevokeCredentialsOperation`s will be
-now signed by this new `revocation` keys.
-
-The advantage is that we could have a more granular key management. For example, we could have a DID document where one
-entity controls a key and has the responsibility of issuing credentials, and let separate entities the right to audit
-and revoke credentials without also granting them the power to issue batches.
-
-### Proposal - incorporate the above key type
-
-We see the value in splitting the role of issuing keys. Therefore, we propose to update the protocol specification,
-key derivation documentation, and incorporate this new key type.
diff --git a/docs/protocol/key-derivation-test-vectors.json b/docs/protocol/key-derivation-test-vectors.json
deleted file mode 100644
index d051a7fcc6..0000000000
--- a/docs/protocol/key-derivation-test-vectors.json
+++ /dev/null
@@ -1,159 +0,0 @@
-[
- {
- "dids" : [
- {
- "DID" : "did:prism:6fe5591aabaf1e41744f074336001f37be74534c00a99c3874c3a4690981dced",
- "keys" : [
- {
- "bip32path" : "m/1'/0'/0'",
- "keyId" : "master-0",
- "number" : 0,
- "publicKey" : {
- "hex" : "03cba11a413c631c853685bfd852b3163ffb124c03712f4a81cd115f72d6ced9f9",
- "xHex" : "cba11a413c631c853685bfd852b3163ffb124c03712f4a81cd115f72d6ced9f9",
- "yHex" : "69278cf55b5d72ea6ad01f1a14787c2ee316cbe8f96897c6f8e9b23b13efe565"
- },
- "secretKey" : {
- "hex" : "dd7115d710d2eaa591f241145ebead016f7a2b90f2b86f5f743731fe37aa3ac5"
- },
- "type" : "master"
- },
- {
- "bip32path" : "m/1'/0'/1'",
- "keyId" : "master-1",
- "number" : 1,
- "publicKey" : {
- "hex" : "03cdd203ac26fbc3282abd9a422558a3185371a27406164e2433a155a7bf901fa8",
- "xHex" : "cdd203ac26fbc3282abd9a422558a3185371a27406164e2433a155a7bf901fa8",
- "yHex" : "e9b72fe04894b19a8931d483a6b0979e95e3bbb34f786b6ac8512199989d2703"
- },
- "secretKey" : {
- "hex" : "3c10eeba06cd6efefe017146fdf400d7e78b9fa461a6c7cd953e1fe235d35416"
- },
- "type" : "master"
- },
- {
- "bip32path" : "m/1'/1'/5'",
- "keyId" : "issuing-5",
- "number" : 5,
- "publicKey" : {
- "hex" : "0208e12a3029d8e6635ed40788250014831d47f58ed9e9ff5ddb217ab9fe931c09",
- "xHex" : "08e12a3029d8e6635ed40788250014831d47f58ed9e9ff5ddb217ab9fe931c09",
- "yHex" : "2eb3147e1c0d93908189f708657ac35b4e06a563b98bf09731d997be719a0d5e"
- },
- "secretKey" : {
- "hex" : "f3222b9f1eeea1c11ceaaf39ff428c140c0f03e323d82d13ba94ca067ef1b79b"
- },
- "type" : "issuing"
- },
- {
- "bip32path" : "m/1'/2'/20'",
- "keyId" : "communication-20",
- "number" : 20,
- "publicKey" : {
- "hex" : "02c10a63a773514bebfc8c9425737963ed135936172cec6aa13c9777201ffac50c",
- "xHex" : "c10a63a773514bebfc8c9425737963ed135936172cec6aa13c9777201ffac50c",
- "yHex" : "58eaf1c1ed59a0eb56525498aabf8256e93953baca0c320c24827ea203b33ec4"
- },
- "secretKey" : {
- "hex" : "f8047dd871d8e79f54d9f202910eba779e67761d60b1b09ad1b8e72f2acbab2d"
- },
- "type" : "communication"
- },
- {
- "bip32path" : "m/1'/3'/27'",
- "keyId" : "authentication-27",
- "number" : 27,
- "publicKey" : {
- "hex" : "02ae26207160333e91fad88529b784bdbd9cd1a95e5ab6bbdc93ef289fa617c72e",
- "xHex" : "ae26207160333e91fad88529b784bdbd9cd1a95e5ab6bbdc93ef289fa617c72e",
- "yHex" : "c48c13b927248aab32874c0506dd26b6a90ef8452ab86147a4317045aed8f958"
- },
- "secretKey" : {
- "hex" : "0251a9f0c65de414c9522834afe62806d8c1a538c5282abbebf51f9f92f1eab5"
- },
- "type" : "authentication"
- }
- ],
- "number" : 1
- },
- {
- "DID" : "did:prism:60e5c0b68701bac49873bc273017ad199a063e1b614444312dd2e97e1e9fb164",
- "keys" : [
- {
- "bip32path" : "m/17'/0'/0'",
- "keyId" : "master-0",
- "number" : 0,
- "publicKey" : {
- "hex" : "023d372976f436182400d21c07404b6d42fb87f84e59b5a7c715025cf85a5a3362",
- "xHex" : "3d372976f436182400d21c07404b6d42fb87f84e59b5a7c715025cf85a5a3362",
- "yHex" : "dd0be8438eacd02d82e2e0c3069b20f71bda8e9f57a49183f73f4f4c127435d4"
- },
- "secretKey" : {
- "hex" : "038ff9d6e6830ca7f5e875d4400c2a9f973551cafc7b077e5dfb23e49eb13e3f"
- },
- "type" : "master"
- },
- {
- "bip32path" : "m/17'/0'/17'",
- "keyId" : "master-17",
- "number" : 17,
- "publicKey" : {
- "hex" : "02855112018b81d80d0187480fee241d0abf38e2f80dcab0039344f1b4a97cb7ab",
- "xHex" : "855112018b81d80d0187480fee241d0abf38e2f80dcab0039344f1b4a97cb7ab",
- "yHex" : "5dcc4c3877189fceaf1809aed0fd4491b900ed48b637f8529bc91bd305787b1c"
- },
- "secretKey" : {
- "hex" : "415590b46f2e4b0453da62328b8701becdacffd582fcd1008973774589199457"
- },
- "type" : "master"
- },
- {
- "bip32path" : "m/17'/3'/0'",
- "keyId" : "authentication-0",
- "number" : 0,
- "publicKey" : {
- "hex" : "03f6ede796792f949807db272c40f451811faf0f7eecb7fb2c6c0fd15d1c62b778",
- "xHex" : "f6ede796792f949807db272c40f451811faf0f7eecb7fb2c6c0fd15d1c62b778",
- "yHex" : "217cb6a49fce5b20e646015422d7ad7c9c70ff7e6f2d02a1228eaff0ada9d66d"
- },
- "secretKey" : {
- "hex" : "7da8202ee5c58aebe3e3b1003c5217c8e9a841ff5da9e6358010635ed126383c"
- },
- "type" : "authentication"
- },
- {
- "bip32path" : "m/17'/3'/17'",
- "keyId" : "authentication-17",
- "number" : 17,
- "publicKey" : {
- "hex" : "021b22881120925cf10381f5a247634665a677f98505bf183726a9b90f245a8a95",
- "xHex" : "1b22881120925cf10381f5a247634665a677f98505bf183726a9b90f245a8a95",
- "yHex" : "5a1b2feb44de35e5071810f931b8b4cf93bb6103eb86665563c65e120f6d9bc8"
- },
- "secretKey" : {
- "hex" : "9f60801c2bf70b18071e8dd2d01d851a5e15602a407a79ff110e6dfb8371e6ce"
- },
- "type" : "authentication"
- }
- ],
- "number" : 17
- }
- ],
- "seedHex" : "3779b041fab425e9c0fd55846b2a03e9a388fb12784067bd8ebdb464c2574a05bcc7a8eb54d7b2a2c8420ff60f630722ea5132d28605dbc996c8ca7d7a8311c0",
- "seedPhrase" : [
- "abandon",
- "amount",
- "liar",
- "amount",
- "expire",
- "adjust",
- "cage",
- "candy",
- "arch",
- "gather",
- "drum",
- "buyer"
- ]
- }
-]
\ No newline at end of file
diff --git a/docs/protocol/key-derivation.md b/docs/protocol/key-derivation.md
deleted file mode 100644
index e6663bddd7..0000000000
--- a/docs/protocol/key-derivation.md
+++ /dev/null
@@ -1,445 +0,0 @@
-
-
-\newpage
-
-# Key derivation and account recovery process
-
-## Context
-
-This document describes the process used to derive keys from a given seed and recover account information.
-A user account is composed of mainly three parts:
-
-- Cryptographic keys: Keys used by a user to control his communications and identity
-- DIDs: Decentralised identifiers created and owned by the user
-- Credentials: Credentials issued, sent and/or received by the user
-
-The three parts of an account are stored in different places.
-- First, cryptographic keys are stored in user wallets. During registration, each user generates a master mnemonic seed
- based on [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki). The seed is known only by the user
- and it is the user's responsibility to store this seed securely. We currently have no mechanism to recover an account
- if a user loses his seed. From that seed, we generate keys based on
- [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki).
-- Second, based on the cryptographic keys, users create their DIDs. DIDs are posted on atala files which are anchored in
- the underlying blockchain by the node. Associated DID Documents and events that update them are also stored off-chain
- and anchored on the ledger by atala operations.
-- Third, credentials are stored by different components, specifically:
- - The credentials manager for the case of issuers and verifiers, and
- - The mobile wallet in the case of other users.
-
- For all users, shared credentials are also stored (encrypted) in the connector.
-
-The account recovery process that we are describing, applies to solve extreme cases, such as hardware damage on clients'
-side. For example, when a user's computer/phone is lost, stolen or breaks down.
-
-In the following sections, we will first describe the process of keys and DIDs generation. Later on, we will explain
-how to recover the different parts of an account.
-
-## Definitions
-
-Throughout this text, we will use definitions extracted from
-[BIP 32 conventions](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions)
-
-We assume the use of public key cryptography used in Bitcoin, namely elliptic curve cryptography using the field and
-curve parameters defined by [secp256k1](http://www.secg.org/sec2-v2.pdf).
-
-Given a initial (public, private)-key pair `m`, that we will call key master seed, we will define a function that
-derives a number of child keys from `m`. In order to prevent all keys depending solely on the key pair itself, the BIP
-extends both private and public keys first with an extra 256 bits of entropy. This extension, called the **chain code**.
-The chain code is identical for corresponding private and public keys, and consists of 32 bytes. We refer to these
-(key, chain code) pairs as _extended keys_.
-- We represent an extended private key as `(k, c)`, with `k` the normal private key, and `c` the chain code.
-- An extended public key is represented as `(K, c)`, with `K = point(k)` and `c` the chain code. Where `point(p)`
- returns the coordinate pair resulting from EC point multiplication (repeated application of the EC group operation)
- of the secp256k1 base point with the integer `p`.
-
-Each extended key has `2^{31}` normal child keys, and `2^{31}` **hardened child** keys. Each of these child keys has an
-index.
-- The normal child keys use indices `0` through `2^{31}-1`
-- The hardened child keys use indices `2^{31}` through `2^{32}-1`. To ease notation for hardened key indices, a number
- `i'` represents `i+2^{31}`
-
-Hardened keys present different security properties that non-hardened keys.
-
-## Generation process
-
-### Root key generation
-
-From [BIP39 spec](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#from-mnemonic-to-seed), the user
-generates a mnemonic seed that can be translated into a 64 bytes seed.
-
-> To create a binary seed from the mnemonic, we use the PBKDF2 function with a mnemonic sentence (in UTF-8 NFKD) used as
-> the password and the string "mnemonic" + passphrase (again in UTF-8 NFKD) used as the salt. The iteration count is set
-> to 2048 and HMAC-SHA512 is used as the pseudo-random function. The length of the derived key is 512 bits (= 64 bytes).
-
-From that 64 bytes seed, we will derive an extended private key that we will note with m.
-Given the initial seed, libraries will provide a `generate` method to obtain m. We will refer to m as the root of our
-keys.
-
-### Children key derivations
-
-BIP32 spec presents [three functions for extended key derivation](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-ckd-functions):
-- CKDpriv((k, c), index): Given an extended private key and an index (>= 0), returns a new "child" extended private key.
-- N((k, c)): Given an extended private key, it returns the corresponding extended public key (point(k), c)
- In order to derive the ith child extended public key from an extended private key (k, c), we perform
- N(CKDpriv((k, c), i))
-- CKDpub((K, c), index): Given an extended public key and an index, returns a new "child" extended public key.
- We won't use this last function
-
-Comments
-- Note that it is not possible to derive a private child key from a public parent key.
-- We refer to these child keys as _nodes_ because the entire derivation schema can be seen as a tree with root m.
-- Each node can be used to derive further keys. This allows to split the different branches of the tree and assign
- different purpose for each one of them.
-- **Notation:** We will use path notation, meaning that instead of `CKDpriv(m, i)` we will write `m / i`. For example,
- the expression `CKDpriv(CKDpriv(CKDpriv(m, 0'), 2'), 5')` translates to `m / 0' / 2' / 5'`. Recall that `i'` means
- `i+2^{31}`.
-- Libraries typically use path notation.
-
-For security reasons we will only use hardened child keys. For more details on this decision, read
-[this link](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#security) .
-
-### The paths used in our protocol
-
-Given our root key m, generated from a mnemonic seed, we will structure our derivation tree according to the following
-path.
-
-```
-m / DID_NUMBER / KEY_TYPE / '
-```
-
-where
-
-- `DID_NUMBER` is a hardened node i' and represents the (i+1)th DID of a user.
-- `KEY_TYPE` is one of:
- - 0': Representing master keys. The keys derived from this node are the only keys that can be used as MASTER_KEYs in
- users' DIDs.
- - 1': Representing issuing keys.The keys derived from this node are the only keys used for credentials and operations
- signing.
- - 2': Representing communication keys. The keys derived from this node are keys used to exchange establish connections
- and exchange messages with other users.
- - 3': Representing authentication keys. The keys derived from this node are keys used to authenticate with websites
- and servers.
-- The final `'` means that all derived keys must be hardened keys
-
-NOTE: The `KEY_TYPE` list may be updated as we progress with the implementation.
-
-Examples
-
-- m / 0' / 0' / 0' is the first master key for the first DID of a user derived from m
-- m / 0' / 0' / 1' is the second master key derived for the first DID for a user derived from m.
-- m / 1' / 0' / 0' is the first master key for the second DID derived from m
-- m / 3' / 0' / 5' is the sixth master key derived for the fourth DID derived from m.
-- m / 1' / 1' / 0' is the first issuing key for the second DID derived from m
-- m / 3' / 1' / 1' is the second issuing key for the fourth DID derived from m
-
-Note that all nodes and final keys are hardened ones.
-
-**Terminology and notation**:
-- Given a `DID_NUMBER` `n`, we refer to the master key `m / n' / 0' / 0'` as the **canonical master key of the DID**.
- Every DID that we will generate has a unique canonical master key.
-- We will use the term _fresh key_ to refer to a key that has not been marked as used already.
-- Given a path `p` we say that a key `k` is _derived_ from `p` if there exist `i` such that `k = p / i`.
-- We say that a key `k` is derived from a master seed `m` if there exists a path `p` with root `m` and index `i` such
- that `k = p / i`.
-
- **Conventions**
- Whenever we generate a fresh key:
- - We will use hardened keys.
- - we will use keys in order. This is, we always use the key derived from the smallest non-used hardened index. For
- example, with respect generating a fresh issuing key for seed `m` and DID `d`, we will always pick the key generated
- by the smallest `i` where ` m / d' / 1' / i'` hasn't been used.
-
-### DID generation
-
-We would like to obtain a DID based from the mnemonic seed too. For this, we will fix a format for our DID Documents.
-When a user wants to create his first DID Document, the following process will be followed:
-
-- Take `cmk = m / 0' / 0' / 0'`. This is, the canonical master key for the first DID derived from m.
-- Mark this key as used for future reference.
-- Create a DID Document that only contains `cmk` as a master key as its only field. The `keyId` field of the key
- object will be `master-0`.
- We call this document the _canonical document associated to `cmk`_
-- Send a CreateDID request to the node.
-
-The request will return the DID suffix of the canonical document. We refer to `did:atala:[returned suffix]` as the
-_canonical DID_ associated to `cmk`.
-
-To create the second DID, we now follow these steps:
-- Take `cmk = m / 1' / 0' / 0'`. This is, the canonical master key for the second DID derived from m.
-- Mark this key as used for future reference.
-- Create a DID Document that only contains `cmk` as a master key as its only field. Again, with `keyId master-0`.
- We call this document the _canonical document associated to `cmk`_
-- Send a CreateDID request to the node.
-
-And, in the general case. If a user wants to create his (n+1)th DID:
-- Take `cmk = m / n' / 0' / 0'`. This is, the canonical master key for the (n+1)th DID derived from m.
-- Mark this key as used for future reference.
-- Create a DID Document that only contains `cmk` as a master key as its only field, with `keyId master-0`.
- We call this document the _canonical document associated to `cmk`_
-- Send a CreateDID request to the node.
-
-DIDs are generated in order and no keys can be shared between DIDs. This is, in order to create a new DID, we always
-pick the minimal N such that `m / N' / 0' / 0'` hasn't been used. No DID can use a key from a branch of the tree that
-does not derived from its own `DID_NUMBER`.
-
-### DID Document updates
-
-In order to update the DID Document and add more keys. The keys should also be generated following similar processes as
-the one for generation.
-As examples:
-1. Imagine a user owns 2 DID Documents D1 and D2 with corresponding canonical master keys k1 and k2 and DID numbers 0
- and 1. Assume that D1 also has a non-canonical master key NK1. Graphically
- ```
- D1 = { keys : [ { keyId: "master-0", type: "Master", id: "k1", key: ... }, { keyid: "master-1", type: "Master", id: "NK1", key: ...}]
- D2 = { keys : [ { keyId: "master-0", type: "Master", id: "k2", key: ... }]
- ```
- To add a new master key to D2 we need to derive the next fresh key from `m / 1' / 0'` which is `m / 1' / 0' / 1'`
- and add it to D2 through an UpdateDID operation, the `keyId` used will be `master-1`.
-2. To add the first issuance key to D1, we should derive the next issuing key from `m / 0' / 1'` which is
- `m / 0' / 1' / 0'` and add it to D1 through an UpdateDID operation, the `keyId` used will be `issuance-1`.
-
-By convention, we will always have that the Nth key added of a given `KEY_TYPE` to a DID Document will be
-`[KEY_TYPE]-[N-1]`. Applications can present uses with local aliases in the front-end. We won't be concerned about
-recovering such aliases in this document.
-
-## Account recovery
-
-Given the generation and derivation rules. The process to recover an account can be model as follows.
-
-### DID recovery
-
-Given that DIDs are generated in order based on canonical master keys. We can use the following process.
-Given a master seed m
-
-1. Set `i = 0`
-2. Compute `cmk = m / i' / 0' / 0'`
-3. Compute the canonical DID associated to `cmk`
-4. Resolve the canonical DID
- 1. If the DID can't be resolved: STOP
- 2. If the DID is resolved, then:
- - Store tuples (i, KEY_TYPE, key) for each key present in the resolved DID Document for future steps (do not
- store cmk)
- - Store cmk as a recovered master key
- - Store the canonical DID as a recovered DID
- - Increase i by 1 and go to step 1
-
-This process will end up with all generated DIDs and their canonical master keys stored.
-It will also compute the set of keys present in the DID Documents that we will attempt to recover in the next step of
-account recovery.
-
-### Key recovery
-
-Note: This process assumes that all keys in the DID Documents owned by the user, were derived by the initial seed m.
-While recovering all DIDs, we computed a set of (DID_NUMBER, key, KEY_TYPE) tuples that need to be recovered. Let's call
-this set `KTR`.
-Given that we derive keys in order, we can recover all listed key in the following way.
-
-1. Group tuples in `KTR` and group them by `(DID_NUMBER, KEY_TYPE)`. This will give us a set of keys that belong to the
- same DID and have the same KEY_TYPE. We will note each one of this set `KTR(DID_NUMBER, KEY_TYPE)`
-2. For each set `KTR(i, kt)` from step 1. Derive the first `size(KTR(d,kt))` hardened keys from `m / i' / kt'`. This is
- the set `{ m / i' / kt' / 0',..., m / i' / kt' / (size(KTR(i, kt)) - 1)' }`
-3. The keys from step 2 MUST provide us matching private keys corresponding to the ones in the `KTR(i, kt)` sets.
-4. Mark the recovered keys as used for future reference.
-
-We would like to remark that the process could be adapted to allow external keys in our DID Documents.
-The process would follow the same steps with the modification that, in step 2, we will generate keys until we find the
-first one that does not match a public key in the set `KTR(d,kt))`. Also, in step 4, only keys with matching public key
-in a DID Document will be marked as used.
-
-### Credentials recovery
-
-Now that we have explored how to recover all DIDs and keys present in them. The remaining part of the account that we
-need are the credentials relevant to the user.
-Issuers and verifiers currently have their credentials in the credentials manager. Holders can find them in the
-connector. The recovery process will consist of generating keys used to communicate with services (e.g. authorization
-keys) from the master seed and call adequate APIs to get credentials stored by these components.
-
-Note that the above process should be updated according to implementation updates.
-
-### Test vectors
-
-Test vectors are available as [JSON file](key-derivation-test-vectors.json).
-
-#### Vector 1
-
-Seed phrase: abandon amount liar amount expire adjust cage candy arch gather drum buyer
- * [DID: 1]
- * DID: did:prism:6fe5591aabaf1e41744f074336001f37be74534c00a99c3874c3a4690981dced
- * [Key master 0]
- * Key ID: master-0
- * BIP32 Path: m/1'/0'/0'
- * Secret key
- * (hex): dd7115d710d2eaa591f241145ebead016f7a2b90f2b86f5f743731fe37aa3ac5
- * Public key
- * (hex): 03cba11a413c631c853685bfd852b3163ffb124c03712f4a81cd115f72d6ced9f9
- * (x-hex): cba11a413c631c853685bfd852b3163ffb124c03712f4a81cd115f72d6ced9f9
- * (y-hex): 69278cf55b5d72ea6ad01f1a14787c2ee316cbe8f96897c6f8e9b23b13efe565
- * [Key master 1]
- * Key ID: master-1
- * BIP32 Path: m/1'/0'/1'
- * Secret key
- * (hex): 3c10eeba06cd6efefe017146fdf400d7e78b9fa461a6c7cd953e1fe235d35416
- * Public key
- * (hex): 03cdd203ac26fbc3282abd9a422558a3185371a27406164e2433a155a7bf901fa8
- * (x-hex): cdd203ac26fbc3282abd9a422558a3185371a27406164e2433a155a7bf901fa8
- * (y-hex): e9b72fe04894b19a8931d483a6b0979e95e3bbb34f786b6ac8512199989d2703
- * [Key issuing 5]
- * Key ID: issuing-5
- * BIP32 Path: m/1'/1'/5'
- * Secret key
- * (hex): f3222b9f1eeea1c11ceaaf39ff428c140c0f03e323d82d13ba94ca067ef1b79b
- * Public key
- * (hex): 0208e12a3029d8e6635ed40788250014831d47f58ed9e9ff5ddb217ab9fe931c09
- * (x-hex): 08e12a3029d8e6635ed40788250014831d47f58ed9e9ff5ddb217ab9fe931c09
- * (y-hex): 2eb3147e1c0d93908189f708657ac35b4e06a563b98bf09731d997be719a0d5e
- * [Key communication 20]
- * Key ID: communication-20
- * BIP32 Path: m/1'/2'/20'
- * Secret key
- * (hex): f8047dd871d8e79f54d9f202910eba779e67761d60b1b09ad1b8e72f2acbab2d
- * Public key
- * (hex): 02c10a63a773514bebfc8c9425737963ed135936172cec6aa13c9777201ffac50c
- * (x-hex): c10a63a773514bebfc8c9425737963ed135936172cec6aa13c9777201ffac50c
- * (y-hex): 58eaf1c1ed59a0eb56525498aabf8256e93953baca0c320c24827ea203b33ec4
- * [Key authentication 27]
- * Key ID: authentication-27
- * BIP32 Path: m/1'/3'/27'
- * Secret key
- * (hex): 0251a9f0c65de414c9522834afe62806d8c1a538c5282abbebf51f9f92f1eab5
- * Public key
- * (hex): 02ae26207160333e91fad88529b784bdbd9cd1a95e5ab6bbdc93ef289fa617c72e
- * (x-hex): ae26207160333e91fad88529b784bdbd9cd1a95e5ab6bbdc93ef289fa617c72e
- * (y-hex): c48c13b927248aab32874c0506dd26b6a90ef8452ab86147a4317045aed8f958
- * [DID: 17]
- * DID: did:prism:60e5c0b68701bac49873bc273017ad199a063e1b614444312dd2e97e1e9fb164
- * [Key master 0]
- * Key ID: master-0
- * BIP32 Path: m/17'/0'/0'
- * Secret key
- * (hex): 038ff9d6e6830ca7f5e875d4400c2a9f973551cafc7b077e5dfb23e49eb13e3f
- * Public key
- * (hex): 023d372976f436182400d21c07404b6d42fb87f84e59b5a7c715025cf85a5a3362
- * (x-hex): 3d372976f436182400d21c07404b6d42fb87f84e59b5a7c715025cf85a5a3362
- * (y-hex): dd0be8438eacd02d82e2e0c3069b20f71bda8e9f57a49183f73f4f4c127435d4
- * [Key master 17]
- * Key ID: master-17
- * BIP32 Path: m/17'/0'/17'
- * Secret key
- * (hex): 415590b46f2e4b0453da62328b8701becdacffd582fcd1008973774589199457
- * Public key
- * (hex): 02855112018b81d80d0187480fee241d0abf38e2f80dcab0039344f1b4a97cb7ab
- * (x-hex): 855112018b81d80d0187480fee241d0abf38e2f80dcab0039344f1b4a97cb7ab
- * (y-hex): 5dcc4c3877189fceaf1809aed0fd4491b900ed48b637f8529bc91bd305787b1c
- * [Key authentication 0]
- * Key ID: authentication-0
- * BIP32 Path: m/17'/3'/0'
- * Secret key
- * (hex): 7da8202ee5c58aebe3e3b1003c5217c8e9a841ff5da9e6358010635ed126383c
- * Public key
- * (hex): 03f6ede796792f949807db272c40f451811faf0f7eecb7fb2c6c0fd15d1c62b778
- * (x-hex): f6ede796792f949807db272c40f451811faf0f7eecb7fb2c6c0fd15d1c62b778
- * (y-hex): 217cb6a49fce5b20e646015422d7ad7c9c70ff7e6f2d02a1228eaff0ada9d66d
- * [Key authentication 17]
- * Key ID: authentication-17
- * BIP32 Path: m/17'/3'/17'
- * Secret key
- * (hex): 9f60801c2bf70b18071e8dd2d01d851a5e15602a407a79ff110e6dfb8371e6ce
- * Public key
- * (hex): 021b22881120925cf10381f5a247634665a677f98505bf183726a9b90f245a8a95
- * (x-hex): 1b22881120925cf10381f5a247634665a677f98505bf183726a9b90f245a8a95
- * (y-hex): 5a1b2feb44de35e5071810f931b8b4cf93bb6103eb86665563c65e120f6d9bc8
-
-## Future challenges
-
-A problem for a near future is that we would like to enable users to create DID Documents that contain more than just
-the canonical master key. This can be achieved today by creating a canonical DID and then performing an update
-operation. Given the metadata space we have, we could batch the two operations in a single Cardano transaction. If we
-opt for this approach, the recovery process would work without the need of changes.
-
-As an alternative, we could generate initial DID documents with more keys and data than the canonical master key and
-back up them in some external storage. The recovery process would then iterate upon the documents and re-generate the
-keys associated to them.
-
-Another alternative could be to extract from an initial state of DID Document its _canonical_ part, this is the initial
-master key associated to the DID. The DID suffix could be dependent only on the canonical part. For example, an initial
-DID Document could be:
-
-```
-{
- "keys": [
- {
- "id": "master0",
- "type": "MASTER_KEY",
- "key": ...
- },
- {
- "id": "issuance0",
- "type": "ISSUING_KEY",
- "key": ...
- }
- ]
-}
-```
-
-where the canonical part is the DID Document
-
-```
-{
- "keys": [
- {
- "id": "master0",
- "type": "MASTER_KEY",
- "key": ...
- }
- ]
-}
-```
-
-The DID suffix could be computed from the canonical part (its hash). This could allow for published DIDs to have a
-generic initial state without breaking our recovery process. However, the non-canonical part does not become self
-certifiable anymore. Meaning that for an unpublished DID, a user could have the DID Documents:
-
-```
-{
- "keys": [
- {
- "id": "master0",
- "type": "MASTER_KEY",
- "key": ...
- },
- {
- "id": "issuance0",
- "type": "ISSUING_KEY",
- "key": ...
- }
- ]
-}
-```
-
-and
-
-```
-{
- "keys": [
- {
- "id": "master0",
- "type": "MASTER_KEY",
- "key": ...
- },
- {
- "id": "master2",
- "type": "MASTER_KEY",
- "key": ...
- }
- ]
-}
-```
-
-and both would have the same short form (canonical) DID.
-The approach still would not solve the recovery of [unpublished DIDs](./unpublished-dids.md).
-
-We have also mentioned that the node could expose an endpoint to obtain DIDs based on a key. This could also allow a
-recovery process that remains compatible with a generic initial DID Document.
-
-The above non-extensive analysis suggests the need of external storage for DID recovery or to split the creation of
-complex DID Documents into a Create and an Update part.
diff --git a/docs/protocol/late-publish.md b/docs/protocol/late-publish.md
deleted file mode 100644
index dc5f240cb0..0000000000
--- a/docs/protocol/late-publish.md
+++ /dev/null
@@ -1,292 +0,0 @@
-
-
-\newpage
-
-# Late publication
-
-So far, we have explained how the protocol can achieve scalability though a batching technique. However, it is important
-to also consider other implications of this approach. We will describe a potential "attack" that is possible due to
-having off-chain data that is referenced from on-chain transactions. In short, the absence of control mechanisms to
-manage off-chain data availability may allow an attacker to produce undesired situations as we will explain below.
-
-## The basic scenario
-
-At this point, the implementation is not whitelisting who can publish Atala objects and blocks. Let's consider then, the
-following scenario, Alice decides to create a new DID `D1` with DID Document with key `k1`. She creates an Atala Object,
-`A1`, and a single operation Atala block, `B1`. She then sends a transaction, `Tx1`, to the underlying ledger with a
-reference to `A1` in its metadata and publishes the generated files in the CAS. All nodes will see `D1` with `k1` as
-valid since `Tx1.time`.
-
-Now, imagine that Alice creates another Atala object, `A2`, and corresponding single operation Atala block, B2, that
-updates `D1` by revoking `k1`, she sends a valid transaction, `Tx2` (with a reference to `A2`), to the underlying
-ledger, but this time she does not post the files to the CAS. This should lead other nodes that are exploring ledger
-transactions, to attempt to resolve the reference in `Tx2` and fail. In order to avoid nodes to be blocked by this
-situations, we could mark `Tx2` with an `UNRESOLVED` tag and continue processing subsequent transactions. Nodes would
-keep a list of `UNRESOLVED` transactions and would keep attempting to retrieve missing files periodically until they
-succeed. Once a missing file is found, the node state should be properly updated (where properly means, reinterpret the
-entire ledger history and applying updates to the state incorporating the missing file operations). Note that we say
-that this _should_ happen, however, the current implementation assumes that the files are always published.
-
-The outcome of the above process should lead all nodes to have the following state:
-
-- A transaction `Tx1` which publishes `D1` with a DID Document that contains `k1` as valid key
-- An `UNRESOLVED` transaction `Tx2` (which updates `D1` to revoke `k1`)
-
-```
-Ledger
- ----+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ···
- ----+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1
-]
-
-Node State
-unresolved = [hash(A2)]
-dids = [ D1 --> [k1 valid since Tx1.time ] ]
-```
-
-Alice could now issue a credential `C1` signing the `IssueCredential` operation with `k1`, post it in files `A3`, `B3`
-with a transaction `Tx3` and publishing `A3`, `B3` in the CAS. Leading to:
-
-```
-Ledger
- ----+-----+---------+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ··· | Tx3 | ···
- ----+-----+---------+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1,
- hash(A3) --> A3,
- hash(B3) --> B3
-]
-
-Node State
-unresolved = [hash(A2)]
-dids = [ D1 --> [k1 valid since Tx1.time ] ]
-creds = [ hash(C1) --> Published on Tx3.time ]
-```
-
-If Bob receives `C1` and validates it, the node will state that the credential is correct.
-
-Later, Alice can post `A2` and `B2` to the CAS. This _should_ lead to a history reinterpretation (note that this is not
-currently implemented) producing the state in all nodes as follows:
-
-```
-Ledger
- ----+-----+---------+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ··· | Tx3 | ···
- ----+-----+---------+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1,
- hash(A2) --> A2,
- hash(B2) --> B2,
- hash(A3) --> A3,
- hash(B3) --> B3
-]
-
-Node State
-unresolved = []
-dids = [ D1 --> [k1 valid since Tx1.time, revoked in Tx2.time ] ]
-creds = [ ] -- as the IssueCredential event was signed with an invalid key, C1 may not even be added to the credentials
- -- map
-```
-
-If Bobs tries to validate `C1` again, he won't only find that `C1` is invalid now. According to ledger history, `C1` was
-_never_ valid because it was signed with a key that was revoked _before_ `C1` was published. Note that, the only ways to
-notify Bob the change in state of `C1` are
- 1. Bob periodically queries the state of `C1`.
- 2. The node updates the history rewrite, replays all queries done by Bob and notifies him if any response would be
- different than the one he received before the history change.
-
-If there is no history rewrites, Bob could simply subscribe to a notification of a `C1` revocation event. As `k1`, in
-this example, is used to sign the operation, Bob could notify the node to inform him if the credential becomes invalid
-after history rewrite.
-Furthermore, note that if `C1` did not support revocation, then this history rewrite could "in the real world" imply its
-revocation after a proper validation.
-
-
-## Comparative - situation 2: Attack after key revocation
-
-Let us now imagine the scenario where Alice is not hiding files. She starts by publishing `Tx1`, `A1` and `B1` as before.
-Imagine that now Alice suspects that her key was compromised and decides to revoke it by publishing `Tx2`, `A2` and `B2`.
-The state reflected in the system from the perspective of a node would be:
-
-```
-Ledger
- ----+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ···
- ----+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1
- hash(A2) --> A2,
- hash(B2) --> B2
-]
-
-Node State
-unresolved = [ ]
-dids = [ D1 --> [k1 valid since Tx1.time, revoked at Tx2.time ] ]
-creds = [ ]
-```
-
-If an attacker Carlos, who actually got control of `k1`, creates a credential `C1` and sends an `IssueCredential` event
-for `C1` in `Tx3`, `A3`, `B3`. We would get the state:
-
-```
-Ledger
- ----+-----+---------+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ··· | Tx3 | ···
- ----+-----+---------+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1,
- hash(A2) --> A2,
- hash(B2) --> B2,
- hash(A3) --> A3,
- hash(B3) --> B3
-]
-
-Node State
-unresolved = []
-dids = [ D1 --> [k1 valid since Tx1.time, revoked in Tx2.time ] ]
-creds = [ ] -- C1 is not even added to creds map because the key to sign the IssueCredential operation was revoked
-```
-
-Note that in this situation there is no point in "real world time" where `C1` could have been valid, as Carlos was not
-able to publish `C1` before `k1` is revoked. The point that we want to remark is: **_this ledger, CAS and node states
-are identical to the ones where the late publish occurred_**
-
-## Comparative - situation 3: Attack after and later credential and key revocation
-
-Let us now analyse a third situation. In this case, there is again no late publication. As in the last two scenarios,
-we start with Alice publishing `D1` with valid `k1` through `Tx1`, `A1` and `B1`. Now imagine that Carlos gains access
-to `k1` and issues `C1` though `Tx2`, `A2`, `B2` before Alice manages to revoke `k1`. The system state would look like
-this:
-
-```
-Ledger
- ----+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ···
- ----+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1
- hash(A2) --> A2,
- hash(B2) --> B2
-]
-
-Node State
-unresolved = [ ]
-dids = [ D1 --> [k1 valid since Tx1.time ] ]
-creds = [ hash(C1) -> Published on Tx2.time ]
-```
-
-If now Bob receives `C1` and verifies it, it will receive a response stating that the credential is valid.
-Now, if Alice notices that a credential was issued without her knowledge, she could revoke `C1` and `k1` through a
-transaction `Tx3` with files `A3` and `B3` containing an `UpdateDID` and a `RevokeCredential` operations. Leading to the
-state:
-
-```
-Ledger
- ----+-----+---------+-----+---------+-----+-----
- ··· | Tx1 | ··· | Tx2 | ··· | Tx3 | ···
- ----+-----+---------+-----+---------+-----+-----
-
-CAS
-[
- hash(A1) --> A1,
- hash(B1) --> B1,
- hash(A2) --> A2,
- hash(B2) --> B2,
- hash(A3) --> A3,
- hash(B3) --> B3
-]
-
-Node State
-unresolved = []
-dids = [ D1 --> [k1 valid since Tx1.time, revoked in Tx4.time ] ]
-creds = [ hash(C1) -> Published on Tx2.time, revoked on Tx3.time ]
-```
-
-Note that both the blockchain and the node state reflect the period of time in which verifying `C1` could have resulted
-in an is valid conclusion. We could even extend the operations we support to add a `RevokeSince` to reflect that the
-intention is to revoke a credential since a block number/time previous to the revocation event and still keep the
-record of precisely what happened on chain.
-
-**Comment**
-
-We would like to remind that, we wait for transactions to be in the stable part of the ledger before applying them to
-the node state. This means that there is a time period a user has to detect that an event triggered on his behalf is
-going to be applied. We could allow issuers to specify waiting times in some way to facilitate key compromised
-situations. E.g. a credential schema could be posted on chain specifying how many **stable** blocks the protocol needs
-to wait to consider a credential valid (to facilitate the issuer with time to detect compromised keys). If a credential
-is issued but a revocation event is found (in a stable block) before the end of this waiting period (after the issuance
-event), the protocol can then ignore the credential, never adding it to its state and, hence, improving the chance for
-unauthorised credentials to never be valid.
-
-Another small improvement we could remark is, if a client queries data to a node, and the node can see in the
-**unstable** part of the blockchain that an event will affect the queried data, the node could reply requests with an
-extra flag pointing out the situation. It will be then up to the client to wait for a few minutes to see if data changes
-are confirmed, or to simply ignore the incoming information.
-
-## Some conclusions
-
-A system that allows late publishing brings some complexities:
-
-- Rollbacks and history rewrites should be handled properly
-- Clients should have richer notification methods to understand what happened
-- A priori, credentials that do not have a revocations semantic, could be revoked through late publication
-- Auditability becomes a bit blurry
-- It makes reasoning more complex if we expand the protocol with more operations
-- As illustrated [here](https://medium.com/transmute-techtalk/sidetree-and-the-late-publish-attack-72e8e4e6bf53),
- it also adds complexities to DIDs or credentials transferability.
-
-Now, not everything is negative. Late publishing possibilities are a consequence of decentralization.
-
-If one decides not to allow late publishing, the selected approach to do so may bring disadvantages:
-
-- Whitelisting who can issue operations with batches
- + This leads to some centralization. It could be mitigated by allowing on-chain operation publishing (which is our
- current plan). Whoever does not want to relay on IOHK, can issue their operations on-chain without batching.
- They would need to trust that IOHK won't create a late publish scenario (which everyone would be able to audit).
- + We could also consider to give two references to our protocol files, one to S3 and one to IPFS. Batching
- transactions would still be done only by IOHK. The node would try to get the file from IPFS first, if it fails it
- would try IOHK CAS (currently S3), if both fail, then the node should stop and report the error.
- In this way, the system would be less dependent on IOHK's existence because anyone could first post the file in
- IPFS and then send it to us. In this way, even if IOHK disappears, the files could be supported online by other
- entities making the DIDs generated truly persistent. The system would still remain functional without IOHK. This
- may also require less resources from IOHK as IPFS could reply to queries that would go to S3.
- + Unfortunately, the above idea still does not remove the possibility of IOHK performing a late publish attacks
- because IOHK could still post references to self generated blocks or by modifying Atala block sent by a user and
- posting a reference on chain of a subset (or superset) of the block provided and not posting the file itself.
- * We may be able to mitigate the subset case by requesting users to sign the list of all operations they intend
- to batch in a single block (assuming all operations batched by a user are signed by single DID, which is the
- case of issuing a barch of credentials). Then we could request that signature in the batch as an integrity
- requirement. This would not allow IOHK to publish a subset of the batch.
- * Note (as a consequence of the above point) that if we restrict batches to only contain events created by a single
- authority (i.e. all events in a batch are signed by the same key), we could request that the batch file must have a signature of the same key.
- This removes any possibility for IOHK to perform late publish attacks on other users data.
-- A BFT CAS system that provides proofs of publication could be a great solution for us, but may be complex (if at all
- possible) to implement.
-- Allow legally bounded entities to also publish files could aid towards a semi-decentralized system.
-- Once we implement rollbacks/history rewrites we could consider further decentralization if we see the need, but it
- would bring the possible auditability details or the issue with non-revocable credentials mentioned in the document.
-
-Long story short. If we handle history rewrites, we can guarantee consensus of all nodes about the _current_ state of
-the system. However, if we implement a way to remove late publication possibilities, we would also get consensus about
-the _past history_ of the system. Different use cases may require different approaches.
diff --git a/docs/protocol/make.sh b/docs/protocol/make.sh
deleted file mode 100755
index e8d91c8c43..0000000000
--- a/docs/protocol/make.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-set -o pipefail
-
-SCRIPT_PATH=${BASH_SOURCE[0]}
-SCRIPT_FOLDER=$(readlink -m "$(dirname "$SCRIPT_PATH")")
-
-function make() {
- echo Making "$1"
-
- pandoc -N -f markdown+smart \
- --self-contained \
- --standalone \
- --toc \
- --highlight-style pygments \
- --filter pandoc-citeproc \
- --bibliography biblio.bib \
- --pdf-engine=xelatex \
- -V papersize:a4 \
- -V geometry:margin=1.2in \
- -V fontsize=10pt \
- -V date="Revision: $(git show -s --format='%h')\\\\Revision Date: $(git show -s --format='%cD')" \
- -V author="The Atala Team, IOHK" \
- -s \
- intro.md \
- protocol-v0.1.md \
- protocol-v0.2.md \
- protocol-v0.3.md \
- canonicalization.md \
- key-derivation.md \
- unpublished-dids.md \
- late-publish.md \
- protocol-other-ideas.md \
- -o "$1"
-}
-
-cd "$SCRIPT_FOLDER"
-
-make protocol.pdf
-make protocol.docx
diff --git a/docs/protocol/protocol-other-ideas.md b/docs/protocol/protocol-other-ideas.md
deleted file mode 100644
index 2970b98b2c..0000000000
--- a/docs/protocol/protocol-other-ideas.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-
-\newpage
-
-# Improvement proposals
-
-This document contains ideas we would like to evaluate for future releases.
-
-
-## Make use of the underlying Cardano addresses
-
-Our operations require signatures that arise from DID related keys.
-Apart from this signature, an operation is attached to a transaction that will also contain a signature related to its
-spending script. We _may_ be able to reduce the number of signatures to one.
-
-Sidetree has update commitments (idea that probably comes from KERI[1](https://www.youtube.com/watch?v=izNZ20XSXR0))
-[2](https://arxiv.org/abs/1907.02143). The idea is that, during DID creation, the controller commits to the hash of a
-public key. Later, during the first update, the operation must be signed by the key whose hash matches the initial
-commitment. The update operation defines a commitment for the next operation. These keys are part of the protocol and
-are optionally part of the DID Document.
-
-Now, here is an improvement we could use:
-
-- The initial DID could set a commitment to a public key hash PKH1 (as in KERI and Sidetree). We call this, the genesis
- commitment.
-- Now, in order to perform the first update, the controller needs a UTxO to spend from. Let the user receive a
- funding transaction to an output locked by PKH1. Let us call this UTxO, U1.
-- The update operation could now add metadata in a transaction that _spends_ U1. The transaction signature will be
- required by the underlying nodes, meaning that we could get signature validation for free. This transaction could also
- create a new single UTxO, that we could name U2. This output, could again be a P2PKH script representing the next
- commitment.
-
-We could define similar "chains" of transactions for DID revocation, credential issuance (declared in an issuer DID),
-and possible credential revocation registry.
-
-Some observations of this approach are:
-
-- Positive: We get smaller metadata.
-- Positive: Light node ideas described in the section [below](#light-nodes-and-multi-chain-identity).
-- Positive: We may be able to get "smarter" locking scripts for operations relaying on the underlying chain scripts.
-- Negative: It becomes more complex to do "on-chain batching" for DID updates. We may be able to use a multi-sig script
- for this. Credential batching remains unaffected. Recall that on-chain batching may be considered bad behaviour, see
- [metadata usage concerns](./protocol-v0.3.md#metadata-usage-concerns)
-- Negative: One would need to do the key/transaction management a bit more carefully. The key sequence could be derived
- from the same initial seed we use.
-
-## Light nodes and multi-chain identity
-
-We are having conversations with the research team and Cardano teams to suggest a light node setting for PRISM.
-Assume we could:
-1. Validate chain headers without downloading full blocks.
-2. Find in each header an structure like a bloom filter with low (around 1%) false positive response that tells if a
- UTxO script has been used in the associated block. Note: our proposal is to add a hash of a bloom-like structure and
- not the filter itself.
-
-With those two assumptions, if a light node has the full chain of headers, then given a DID with its initial state, the
-node could:
-
-1. Check all the bloom filters for the one that spends the initial publish key hash we described in the previous section.
- If no bloom filter is positive, then the current DID state is the one provided.
- If K filters return a positive response, we could provide the script to a server which should provide either:
- + A transaction (with its metadata) with its merkle proof of inclusion in one of the positive blocks, or
- + The actual blocks matching to those filters so that the node can check that all were false positives.
-
-The addition of the bloom filter is important because it mitigates the typical SPV problem of a node hiding data.
-
-We may be able to follow a similar process for credential issuance and revocation events.
-We could also check on real time for updates based on the headers for those DIDs for which we already know about.
-For this process we just need to know a genesis commitment.
-
-We should check for possible spam attacks. E.g. providing an address with many matches. We have been referenced to
-[section `TARGET-SET COVERAGE ATTACKS`](https://eprint.iacr.org/2019/1221.pdf) and [this attack on Ethereum
-filters](https://medium.com/@naterush1997/eth-goes-bloom-filling-up-ethereums-bloom-filters-68d4ce237009). Given the
-small size of Cardano blocks, we should evaluate the number of UTxOs and transactions an attacker should create to
-saturate the filter. It should also be compared the optimal relations of filter size w.r.t. block size.
-
-### Bonus feature - multi chain DIDs
-
-With a "light node" approach, we may be able to move from one chain to another while only downloading headers.
-This may be achievable by posting an update operation that declares a change of blockchain and then search for the
-desired script in the other chain.
-
-We are not aware of blockchains that implement bloom filter like structures (or their hashes) in headers for UTxO
-scripts at the time of this writing. We haven't explored yet how to translate this approach to account based systems
-like the Ethereum case.
-
-## Layer 2 batching without late publication
-
-To be expanded
-- The basis is to register _publishers_ that could batch operations. The publishers could be whitelisted by IOHK.
-- DID controllers could send an on-chain event registering to a publisher (useful if they plan multiple updates to the
- same DID). The registration to a publisher could be part of the initial DID state too.
-- From that point on, the associated updates for that DID could only be posted by the publisher.
-- The publisher must publish file references signing the publication with his DID.
-- If a publisher does not reveal a file `F1`, and publishes a file `F2`, then no node should process `F2` until `F1` is
- published. This prevents late publication because no DID is attached to many publishers.
-- The controller can decide to post a _de-registration_ event at any point. It will be interpreted only if all previous
- files are not missing.
-- If there is a missing file and the controller is still registered to the publisher, then its DID gets stuck. This seems
- to be, so far, the only way to avoid late publication.
-- We have evaluated other ways to handle the situation where the publisher does not reveal the file. For example, to
- allow controllers to post on-chain the operation they tried to batch. The complexity arises because we should assume
- that the controller and publisher could collude (they could even be the same person).
- - For example, we thought about having a merkle tree hash published with the file reference, to allow controllers to
- move on in case a file is not revealed. However, the publisher can simply decide to post a different merkle root
- hash or even to never provide the inclusion proof to the controller.
diff --git a/docs/protocol/protocol-v0.1.md b/docs/protocol/protocol-v0.1.md
deleted file mode 100644
index 6db35c8e9e..0000000000
--- a/docs/protocol/protocol-v0.1.md
+++ /dev/null
@@ -1,375 +0,0 @@
-
-
-\newpage
-
-# Slayer v1: Bitcoin 2nd layer protocol
-
-This document describes the protocol for building the credentials project as a 2nd layer on top of Bitcoin. The protocol is based on the [sidetree protocol](https://github.com/decentralized-identity/sidetree/blob/master/docs/protocol.md) from Microsoft.
-
-## Definitions
-
-The definitions from the [high-level-design-document](https://github.com/input-output-hk/atala/tree/develop/prism-backend/docs/bitcoin#definitions) apply here, be sure to review those ones first.
-
-- **DID**: A decentralized identifier, see the [official spec](https://w3c-ccg.github.io/did-spec/).
-- **DID Document**: The document representing a state of a DID, includes details like its public keys.
-- **ATALA Operation**: An operation in the 2nd layer ledger (ATALA Node), like `Create DID`, `Issue Credential`, etc, look into the [supported operations](#atala-operations) for more details. This is equivalent to a Bitcoin Transaction on the 2nd layer protocol.
-- **ATALA Block**: A list of ATALA Operations, includes metadata about the block. This is equivalent to a Bitcoin Block on the 2nd layer protocol.
-- **ATALA Object**: A file that has metadata about an ATALA Block and a reference to retrieve it. A reference to an ATALA Object is the only detail stored in Bitcoin operations.
-- **Genesis Bitcoin Block**: The first Bitcoin Block aware of ATALA Objects, everything before this block can be discarded.
-- **Content Addressable Storage**: System that allows storing files and querying them based on the hash of the file. We have yet to choose whether to use centralized one (e.g. S3), decentralized (IPFS) or some combination of these.
-
-## Cryptography
-
-We use **hashing** to create short digests of potentially long data. For secure cryptographic hash algorithms there is an assumption that it is computationally intractable to find two values that have the same hash. This is why the hash is enough to uniquely identify the value. SHA256 algorithm is used, unless specified otherwise.
-
-Cryptographic signatures are used to assure person who generated the message is who they claim they are. Keys are generated and managed by users - you can read more on that in [Key management](#key-management) section. SHA256 with ECDSA is used unless specified otherwise.
-
-
-## Differences from sidetree
-
-This section states the main known differences from the sidetree protocol:
-
-- We support credentials while sidetree only supports DIDs.
-
-- We are using Protobuf encoded data while sidetree uses JSON representation.
-
-- We are using GRPC for API while sidetree uses REST.
-
-
-## The path to publish an ATALA Operation
-There are some steps involved before publishing an ATALA Operation, the following steps show what the ATALA Node does to publish a credential proof, publishing any other ATALA Operation would follow the same process.
-
-**NOTE**: We are using JSON protobuf encoding for readability here. Binary encoding is used for querying the node and storage.
-
-1- Let's assume that issuer's DID is already registered on the ledger. In order to register an issued credential they need to send a signed ATALA operation containing its hash:
-
-```json
-IssueCredentialOperation:
-{
- "signedWith": "issuing",
- "signature": "MEUCIQDCntn4GKNBja9LYPHa5U7KSQPukQYwHD2FuxXmC2I2QQIgEdN3EtFZW+k/zOe2KQYjYZWPaV5SE0Mnn8XmhDu1vg4=",
- "operation": {
- "issueCredential": {
- "credentialData": {
- "issuer": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39",
- "contentHash": "7XACtDnprIRfIjV9giusFERzD722AW0+yUMil7nsn3M="
- }
- }
- }
-}
-```
-
-2- After some time the node creates an ATALA Block containing all operations it has received from clients. Here it contains just one, but generally nodes will batch operations in order to lower block publishing costs.
-
-```json
-{
- "version": "0.1",
- "operations": [
- {
- "signedWith": "issuing",
- "signature": "MEUCIQDCntn4GKNBja9LYPHa5U7KSQPukQYwHD2FuxXmC2I2QQIgEdN3EtFZW+k/zOe2KQYjYZWPaV5SE0Mnn8XmhDu1vg4=",
- "operation": {
- "issueCredential": {
- "credentialData": {
- "issuer": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39",
- "contentHash": "7XACtDnprIRfIjV9giusFERzD722AW0+yUMil7nsn3M="
- }
- }
- }
- }
- ]
-}
-```
-
-3- An ATALA Object is created, it links the current ATALA Block. In the future we plan to add metadata which can help the ATALA Node to choose whether to retrieve the ATALA Block or not (think about a light client), such as list of ids of entities that the block affects. ATALA Block hash included is SHA256 hash of block file content - which consists of binary encoded `AtalaBlock` protobuf message.
-
-```json
-{
- "blockHash": "s4Xy4+Cx4b1KaH1CHq4/kq9yzre3Uwk2A0SZmD1t7YQ=",
- "blockOperationCount": 1,
- "blockByteLength": 196
-}
-```
-
-
-4- The ATALA Block and the ATALA Object are pushed to the Content Addressable Storage, each of these files are identified by the hash of its content, like `HASH(ATALA Object)` and `HASH(ATALA Block)` - the hash is computed from the file contents, binary encoded protobuf message.
-
-5- A Bitcoin transaction is created and submitted to the Bitcoin network, it includes a reference to the ATALA Object on the special output (see the one with OP_RETURN), the `FFFF0000` is a magic value (to be defined) which tells ATALA Nodes that this transaction has an ATALA Object linked in it, after the magic value, you can see the `HASH(ATALA Object)` (SHA256 of the file).
-
-```json
-{
- "txid": "0b6e7f92b27c70a6948dd144fe90387397c35a478f8b253ed9feef692677185e",
- "vin": [...],
- "vout": [
- {
- "value": 0,
- "n": 0,
- "scriptPubKey": {
- "asm": "OP_RETURN FFFF0000 015f510a36c137884c6f4527380a3fc57a32fdda79bfd18634ba9f793edb79c2",
- "hex": "6a2258...",
- "type": "nulldata"
- }
- }
- ],
- "hex": "01000000...",
- "time": 1526723817
-}
-```
-
-6- After the published Bitcoin transaction has N (to be defined) confirmations, we could consider the transaction as final (no rollback expected), which could mark all the underlying ATALA operations as final.
-
-## Validation
-
-There are two notions related to operations: we say that an operation is **valid** if it is well-formed - has all required fields - and it is not larger than defined limits. Validity depends only on the operation itself and system parameters, not on its state. We say that operation is **correct** if it is valid, signed properly, and it is either creation operation or refers the previous operation correctly.
-
-Operations that are not valid must be discarded by the node and not included in the block. Inclusion of even one invalid operation renders the whole block invalid and it must be ignored by the nodes. On the other hand operations correctness should not be checked before inclusion - the block can include incorrect operations. It is checked during state updates - incorrect operations are ignored, but one or more incorrect operations don't affect processing other operations in the block.
-
-
-## Updating ATALA Node state
-The ATALA Node has an internal database where it indexes the content from the ATALA operations, this is useful to be able to query details efficiently, like retrieving a specific DID Document, or whether a credential was issued.
-
-The ATALA Node needs to have a Genesis Bitcoin Block specified, everything before that block doesn't affect the ATALA Node State.
-
-These are some details that the ATALA Node state holds:
-
-- DIDs and their respective current document.
-- Credentials (hashes only), who issued them and when they were revoked (if applies)
-
-
-The component updating the ATALA Node state is called the Synchronizer.
-
-The Synchronizer keeps listening for new blocks on the Bitcoin node, stores the Bitcoin block headers and applies rollbacks when necessary.
-
-After block is considered finalized (getting N confirmations), we must look for ATALA Objects, retrieve the object from the storage, perform the validations (to be defined) on the ATALA Object, the ATALA Block, and the ATALA Operations, and apply the ATALA Operations to the current ATALA Node state. That means that the state is lagging N blocks behind what is present in the Bitcoin ledger - and all user queries are replied to basing on such lagging state.
-
-
-## ATALA Operations
-
-Here you can find the possible ATALA Operations. The list will be updated when new operation get implemented.
-
-Each operation sent via RPC to node needs to be signed by the client using relevant key (specified in the operation description) and wrapped into `SignedAtalaOperation` message. Signature is generated from byte sequence obtained by binary encoding of `AtalaOperation` message.
-
-Operations must contain all fields, unless specified otherwise. If there is a value missing, the operation is considered invalid.
-
-We can divide operations into two kinds: ones that create a new entity (e.g. CreateDID or IssueCredential) and ones that affect existing one (e.g. RevokeCredential). The latter always contain a field with previous operation hash (SHA256 of `AtalaOperation` binary encoding). If it doesn't match, operation is considered incorrect and it is ignored.
-
-### CreateDID
-
-Registers DID into the ATALA ledger. DID structure here is very simple: it consists only of id and sequence of public keys with their ids. It must be signed by one of the master keys given in the document.
-
-```json
-{
- "signedWith": "master",
- "signature": "MEQCIBZGvHHcSY7AVsds/HqfwPCiIqxHlsi1m59hsUWeNkh3AiAWvvAUeF8jFgKLyTt11RNOQmbR3SIPXJJUhyI6yL90tA==",
- "operation": {
- "createDid": {
- "didData": {
- "publicKeys": [
- {
- "id": "master",
- "usage": "MASTER_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "8GnNreb3fFyYYO+DdiYd2O9SKXXGHvy6Wt3z4IuRDTM=",
- "y": "04uwqhI3JbY7W3+v+y3S8E2ydKSj9NXV0uS61Mem0y0="
- }
- },
- {
- "id": "issuing",
- "usage": "ISSUING_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "F8lkVEMP4pyXa+U/nE2Qp9iA/Z82Tq6WD2beuaMK2m4=",
- "y": "2hHElksDscwWYXZCx1pRyj9XaOHioYr48FPNRsUBAqY="
- }
- }
- ]
- }
- }
- }
-}
-```
-
-RPC Response:
-```json
-{
- "id": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39"
-}
-```
-
-The returned identifier is hex encoding of the hash of the binary representation of `AtalaOperation` message. The DID can be obtained by prefixing the id with "did:atala:".
-
-### UpdateDIDOperation
-
-Updates DID content by sequentially running update actions included. Actions available: **AddKeyAction** and **RemoveKeyAction**. Please note that key id must be unique - when a key is removed its id cannot be reused.
-
-The operation must be signed by a master key. Actions cannot include removal of the key used to sign the operation - in such case the operation is considered invalid. That is to protect agains losing control of the DID - we assure that there is always one master key present that the user is able to sign data with. In order to replace the master key, the user has to issue first operation adding new master key and then another removing previous one. In order for the operation to be considered valid, all its actions need to be.
-
-```json
-{
- "signedWith": "master",
- "signature": "MEQCIGtIUUVSsuRlRWwN6zMzaSi7FImvRRbjId7Fu/akOxFeAiAavOigmiJ5qQ2ORknhAEb207/2aNkQKfzBr0Vw+JS+lw==",
- "operation": {
- "updateDid": {
- "id": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39",
- "actions": [
- {
- "addKey": {
- "key": {
- "id": "issuing-new",
- "usage": "ISSUING_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "Zk85VxZ1VTo2dxMeI9SCuqcNYHvW7mfyIPR0D9PI9Ic=",
- "y": "QsI8QhEe4Z0YnG4kGZglvYfEPME5mjxmWIaaxsivz5g="
- }
- }
- }
- },
- {
- "removeKey": {
- "keyId": "issuing"
- }
- }
- ]
- }
- }
-}
-```
-
-Response:
-
-```json
-{}
-```
-
-### IssueCredential
-
-Register credential into the ATALA ledger, given hash of its contents. It must be signed by one of issuer's current issuing keys.
-
-Example:
-
-```json
-{
- "alg": "EC",
- "keyId": "issuing",
- "signature": "MEUCIQDCntn4GKNBja9LYPHa5U7KSQPukQYwHD2FuxXmC2I2QQIgEdN3EtFZW+k/zOe2KQYjYZWPaV5SE0Mnn8XmhDu1vg4=",
- "operation": {
- "issueCredential": {
- "credentialData": {
- "issuer": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39",
- "contentHash": "7XACtDnprIRfIjV9giusFERzD722AW0+yUMil7nsn3M="
- }
- }
- }
-}
-```
-
-RPC response:
-
-```json
-{
- "id": "a3cacb2d9e51bdd40264b287db15b4121ddee84eafb8c3da545c88c1d99b94d4"
-}
-```
-
-The returned identifier is hex encoding of the hash of the binary representation of `AtalaOperation` message. It is used to refer the credential in the revocation operation.
-
-### RevokeCredential
-
-It must be signed by one of issuer's current issuing keys.
-
-Example:
-
-```json
-{
- "alg": "EC",
- "keyId": "issuing",
- "signature": "MEUCIQCbX9aHbFGeeexwT7IOA/n93XZblxFMaJrBpsXK99I3NwIgQgkrkXPr6ExyflwPMIH4Yb3skqBhhz0LOLFrTqtev44=",
- "operation": {
- "revokeCredential": {
- "previousOperationHash": "o8rLLZ5RvdQCZLKH2xW0Eh3e6E6vuMPaVFyIwdmblNQ=",
- "credentialId": "a3cacb2d9e51bdd40264b287db15b4121ddee84eafb8c3da545c88c1d99b94d4"
- }
- }
-}
-```
-
-RPC response:
-
-```json
-{}
-```
-
-
-## Key management
-As the users of the systems are the owners of their data, **they are responsible for storing their private keys securely**.
-
-There are several details to consider, which depend on the user role.
-
-**NOTE**: This section details the ideal key management features which can't be supported by December.
-
-**NOTE**: The Alpha proposal doesn't include any key management.
-
-### Issuer
-The issuer must have several keys, which are used for different purposes:
-
-1. A **Master Key** which can revoke or generate any of the issuer keys, this should be difficult to access, it's expected to be used only on extreme situations. A way to make it difficult to access could be to split the key into N pieces which are shared with trusted people, in order to recover the master key, any K of these pieces will be required. Compromising this key could cause lots of damage.
-2. A **Issuing Key** which is used for issuing credentials, ideally, it will be stored in a hardware wallet, the Shamir's Secret Sharing schema could be adequate if issuing certificates is only done a couple of times per year. Compromising this key could lead to fake certificates with the correct signatures, or even valid certificates getting revoked (unless there is a separate key for revocation).
-3. A **Communication Key**, which is used for the end-to-end encrypted communication between the issuer and the student's wallet, this can easily be a hot key, Compromising this key could not cause much damage.
-4. An **Authentication Key**, which is used for authenticating the issuer with IOHK. Compromising this key could lead the attacker to get anything IOHK has about the issuer, like the historic credentials, and getting new connection codes. This key will ideally be stored in a hardware wallet, or an encrypted pen-drive.
-
-
-Key storage:
-
-- The issuer private keys are the most powerful because they can issue and revoke certificates, hence, if these keys get compromised, lots of people could be affected.
-- The issuer ideally will handle their private keys using a hardware wallet (like ledger/trezor), but **this won't be supported by December**.
-- Another way is to use an encrypted pen-drive to store the keys, but that should be done outside of our system.
-- Shamir's Secret Sharing is the most common approach to do social key recovery, and while Trezor already supports it, **it can't be done by December**.
-
-
-### Holder
-- While the holder private keys are very important, they are less important than the issuer keys, compromising a user key only affects himself.
-- The mobile wallet generates a mnemonic recovery seed which the holders should store securely, the keys on the wallet are encrypted by a password entered by the holder after running the wallet for the first time.
-
-### Verifier
-- The verifier keys have the less powerful keys, as they are only used to communicate with the holder wallet, getting them compromised doesn't allow the attacker to do much with them, they should be threaded with care but it is simple to recover if they get compromised.
-- The verifier will ideally use an encrypted pen-drive to store their private keys but even a non-encrypted pen-drive may be enough.
-
-
-
-## More
-- The 2nd layer could work like a permissioned ledger on top of a public ledger, we could ask the issuers (trusted actors) to provide us their bitcoin addresses and process operations only from trusted addresses. This approach is similar to what OBFT does, it just accepts node holding keys on the genesis whitelist.
-
-- Trezor or Ledger could be easily used for signing the issuer requests if we sign SHA256 hashes only, let's say, before signing any payload, its SHA256 is computed and that's signed. While this could be handy for testing but it breaks the purpose of hardware wallets, which is that they'll keep your keys safe even if your computer is compromised cause the SHA256 will be displayed before signing requests but the device owner doesn't have any warranty that the SHA256 was produced from its expected payload.
-
-
-## Attacks and countermeasures
-
-### Late publish attack
-
-Malicious issuer, running its own node, can create an Atala Block including key change operation and reference it in Bitcoin blockchain without actually making it available via the Content Addressable Storage. In following blocks they can fully publish operations signed with such key - such as credential issuance or further key changes. In the future they can make the block available, invalidating all the operations made after that.
-
-We don't have a solution for that problem - but the protocol assumes some level of trust towards the issuer anyways. If they want to make a credential invalid they don't need to launch any attacks - they can just revoke it.
-
-The name _Late publish attack_ comes from https://medium.com/transmute-techtalk/sidetree-and-the-late-publish-attack-72e8e4e6bf53 blogpost, where it has been first described (according to our knowledge).
-
-### DoS attack
-
-Attacker might try to disrupt the service by creating blocks with large amount of operations. To counteract that limits are imposed on the number of operations in the block.
-
-Another way of slowing down the network is creating many late publishes, forcing frequent state recomputation. In order to avoid such situation, in the future ATALA Blockchain can be made permissioned, so only approved nodes can publish blocks.
-
-### Replay attack
-
-Adversary might try to fetch an existing operation from Atala Block and send it again, e.g. re-attaching keys that were previously compromised and removed from the DID by its owner. Such strategy won't work in ATALA Blockchain, as each modifying operation contains hash of previous operation affecting the same entity.
-
-### Hide-and-publish attack
-
-In this attack malicious node receives an operation from an issuer, includes it into an ATALA Block and publishes a reference to the generated object in the Bitcoin ledger, but doesn't publish the ATALA Block or Object. The issuer is unable to know if their operation has been included into the block.
-
-In such case they should send the operation again to another node - and it will be published. If the malicious node publishes their block, the new operation will become invalid (as it refers to the hash before first attempt to submit the operation, which is no longer the latest one), but as it is identical to the former, they have the same hash, so following operations won't be affected.
-
-On the other hand when the client doesn't re-publish the exact same transaction, but publish different one instead, for example updating credential timestamp, the situation is much worse. When the malicious node publishes the old transaction, the following new one becomes invalid. Moreover, it has different hash, so whole chain of operations possibly attached to it becomes invalid. In case of very late publishes, days or even months of operations might be lost.
-
-To avoid such situation implementation of issuer tool needs to make sure that if an operation has been sent to any node, it won't create any new operation affecting this entity until the original one is published.
diff --git a/docs/protocol/protocol-v0.2.md b/docs/protocol/protocol-v0.2.md
deleted file mode 100644
index 0d08542624..0000000000
--- a/docs/protocol/protocol-v0.2.md
+++ /dev/null
@@ -1,606 +0,0 @@
-
-
-\newpage
-
-# Slayer v2: Cardano 2nd layer protocol
-
-This document describes the protocol for building the credentials project on top of Cardano.
-The main differences with version v0.1 of the protocol are:
-
-- We now post Atala operations one at a time instead of posting them in batches
-- DID Documents' underlying data is posted along with DIDs on the blockchain during the creation event
-
-Those decisions allow this protocol to work in the open setting while avoiding attacks
-described in version 0.1. Another observation is that the content addressable storage
-now has no need to store public data (DID Documents).
-
-## Definitions
-
-- **DID**: A decentralized identifier, see the [official spec](https://w3c-ccg.github.io/did-spec/).
-- **DID Document**: The document representing a state of a DID, includes details like its public keys
-- **Verifiable Credential**: A signed digital document. E.g. digital university degrees, digital passport, etc.
-- **Atala Node**: An application that follows this protocol. It sends transactions with metadata to the Cardano
- blockchain, read transactions confirmed by Cardano nodes, and interprets the relevant information from metadata
- in compliance with protocol rules
-- **Atala Operation**: Metadata attached to a Cardano transaction, it is used to codify protocol events like
- `Create DID`, `Issue Credential`, etc, look into the [supported operations](#Operations) for more details
-- **Atala Transaction**: A Cardano transaction with an Atala Operation in its metadata
-- **Genesis Atala Block**: The first Cardano Block that contains an Atala transaction, everything before this block can
- be discarded by Atala Nodes
-
-Throughout this document, we will refer to DIDs, DID Documents and verifiable credentials as _entities_.
-We will also user the term _Atala Event_ to refer to the event of detecting a new Atala operation in the stable part of
-Cardano's blockchain. In particular, we will refer to events named after the operations e.g. DID creation event,
-credential revocation event, etc.
-Finally, we will say that a node _processes_ an operation when it reads it from the blockchain and that it _posts_ an
-operation when a client sends a request to it to publish it on the blockchain.
-
-## Protocol
-
-### Objective
-
-The goal of the protocol is to read Atala operations from the stable section of Cardano's blockchain, and keep an updated
-state of DIDs, DID Documents and verifiable credentials. The protocol operations modify the state of the said entities.
-The current list of operations is:
-- **Create DID**: Allows to create and publish a new DID and its corresponding DID Document
-- **Update DID**: Allows to either add or revoke public keys to/from a DID Document
-- **Issue Credential**: Allows an issuer to publish a proof of issuance in Cardano's blockchain
-- **Revoke Credential**: Allows to revoke an already published verifiable credential
-
-Along the operations, the protocol also provides services that any node must implement that do not affect the state.
-The list of services is:
-- **(DID resolution)** Given a published DID, return its corresponding DID Document in its latest state
-- **(Pre validation)** Given a credential's hash, a signer's DID, a key identifier, and an expiration date, reply if the
- credential is invalid with respect to signing dates and keys correspondence. The operation returns a key if the
- validations pass. The client needs to validate cryptographic signatures with the provided key. See
- [credential validation](#Verification) section for more details.
-
-Possible future operations
-- **Revoke DID**: Allows to mark a DID as invalid. Rendering all its keys invalid.
-
-### Operation posting and validation
-
-Atala operations are encoded messages that represent state transitions. The messages' structure and protocol state will
-be described later in this document. When a node posts an operation, a Cardano transaction is created and submitted to
-the Cardano network. The transaction includes the encoded operation on its metadata. In Bitcoin we used to prepend the
-metadata messages with the string `FFFF0000`, a magic value (to be defined) which told Atala Nodes that this transaction
-has an Atala operation linked in it. We will consider doing the same in Cardano blockchain once we get more details on
-the metadata field structure.
-
-After the published Cardano transaction has N (to be defined) confirmations, we could consider the transaction as final
-(no rollback expected), which could mark the underlying operation as final.
-
-There are two definitions related to operations:
- - We say that an operation is **valid** if it is well-formed - has all required fields - and it is not larger than
- defined limits. Validity depends only on the operation itself and system parameters, not on its state.
- - We say that operation is **correct** if it is valid, signed properly, and it is either creation operation or refers
- the previous operation that affects the same entity.
-
-Operations that are not valid must be discarded by the node and not included in a transaction. On the other hand
-operations correctness should not be checked before inclusion. It is checked during state updates - incorrect operations
-are ignored.
-
-### Node state
-
-In order to describe the protocol, we will consider the following state that each node will represent. We will define
-the protocol operations and services in terms of how they affect/interact with this abstract state.
-
-```scala
-// Abstract types
-type Key
-type Hash
-type Date
-
-Concrete types
-type KeyUsage = MasterKey | IssuingKey | AuthenticationKey | CommunicationKey
-type KeyData = {
- key: Key,
- usage: KeyUsage,
- keyAdditiontionEvent: Date, // extracted from the blockchain
- keyRevocationEvent: Option[Date] // extracted from the blockchain
-}
-type DIDData = {
- lastOperationReference: Hash,
- keys: Map[keyId: String, keyData: KeyData]
-}
-type CredentialData = {
- lastOperationReference: Hash,
- issuerDIDSuffix: Hash,
- credIssuingEvent: Date, // extracted from the blockchain
- credRevocationEvent: Option[Date] // extracted from the blockchain
-}
-
-// Node state
-state = {
- dids : Map[didSuffix: Hash, data: DIDDocument],
- credentials: Map[credentialId: Hash, data: CredentialData]
-}
-```
-
-We want to remark that the fields
-- `keyAdditionEvent`
-- `keyRevocationEvent`
-- `credIssuingEvent`
-- `credRevocationEvent`
-represent **timestamps inferred from Cardano's blockchain and not data provided by users**
-
-The representation is implementation independent and applications can decide to optimise it with databases,
-specialized data structures, or others. We aim to be abstract enough for any implementation to be able to map itself
-to the abstract description.
-
-Let's now move into the operations descriptions.
-
-### Operations
-
-Users send operations via RPC to nodes. Each operation request needs to be signed by the relevant key (specified for
-each operation) and wrapped into `SignedAtalaOperation` message. Signature is generated from byte sequence obtained by
-binary encoding of `AtalaOperation` message.
-
-Operations must contain the exact fields defined by schemas, unless specified otherwise. If there is an extra or missing
-value, the operation is considered to be invalid.
-
-We can divide operations into two kinds: ones that create a new entity (e.g. CreateDID or IssueCredential) and ones
-that affect existing one (e.g. UpdateDID, RevokeCredential). The latter always contain a field with previous operation
-hash (SHA256 of `AtalaOperation` binary encoding). If the hash doesn't match with the last performed operation on the
-entity, the operation is considered incorrect and it is ignored.
-
-When describing checks or effects of an operation on the node state, we will use an implicit value `decoded` that
-represents the decoded operation extracted from the blockchain.
-
-#### Create DID
-
-Registers DID into the ledger. The associated initial DID Document structure here is very simple: it consists only of
-a document id (the DID) and sequence of public keys with their respective key ids. It must be signed by one of the
-master keys given in the DID Document.
-
-```json
-{
- "signedWith": "master",
- "signature": "MEQCIBZGvHHcSY7AVsds/HqfwPCiIqxHlsi1m59hsUWeNkh3AiAWvvAUeF8jFgKLyTt11RNOQmbR3SIPXJJUhyI6yL90tA==",
- "operation": {
- "createDid": {
- "didData": {
- "publicKeys": [
- {
- "id": "master",
- "usage": "MASTER_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "8GnNreb3fFyYYO+DdiYd2O9SKXXGHvy6Wt3z4IuRDTM=",
- "y": "04uwqhI3JbY7W3+v+y3S8E2ydKSj9NXV0uS61Mem0y0="
- }
- },
- {
- "id": "issuing",
- "usage": "ISSUING_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "F8lkVEMP4pyXa+U/nE2Qp9iA/Z82Tq6WD2beuaMK2m4=",
- "y": "2hHElksDscwWYXZCx1pRyj9XaOHioYr48FPNRsUBAqY="
- }
- }
- ]
- }
- }
- }
-}
-```
-
-RPC Response:
-
-```json
-{
- "id": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39"
-}
-```
-
-The returned identifier is hex encoding of the hash of the binary representation of `AtalaOperation` message. The DID
-can be obtained by prefixing the id with "did:atala:".
-
-When the operation is observed in the stable part of the ledger, the node performs the following checks:
-
-```scala
-alias didSuffix = getDIDSuffix(decoded)
-alias referredKey: Option[Key] = extractKey(decoded)
-alias messageSigned = decoded.operation
-
- referredKey.nonEmpty &&
- referredKey.get.usage == MasterKey &&
- isValid(decoded.signature, messageSigned, referredKey.get) &&
- ! state.dids.contains(didSuffix)
-```
-
-where `extractKey` searches the key mentioned in `signedWith` from the decoded operation and return the key if found or
-an empty option otherwise.
-
-If the check passes, then we get the following state update:
-
-```scala
-state'.dids = state.dids + (didSuffix -> { lastOperationReference = hash(decoded),
- keys = createMap(decoded.operation.didData.publicKeys) } )
-state'.credentials = state.credentials
-```
-
-where `createMap` maps the data from the request to the model
-
-### Update DID
-
-Updates DID content by sequentially running update actions included. Actions available: **AddKeyAction** and
-**RevokeKeyAction**. Please note that:
-- The key id must be unique - when a key is revoked its id cannot be reused.
-- A revoked key can't be re-added.
-
-The operation must be signed by a master key. Actions cannot include revocation of the key used to sign the operation.
-In such case the operation is considered invalid. This is to protect against losing control of the DID. We assure that
-there is always one master key present that the user is able to sign data with.
-In order to replace the last master key, the user first has to add a new master key and then revoke the previous one
-in a separate operation signed by the newly added key.
-In order for the operation to be considered valid, all its actions need to be.
-
-```json
-{
- "signedWith": "master",
- "signature": "MEQCIGtIUUVSsuRlRWwN6zMzaSi7FImvRRbjId7Fu/akOxFeAiAavOigmiJ5qQ2ORknhAEb207/2aNkQKfzBr0Vw+JS+lw==",
- "operation": {
- "previousOperationHash": "o8rLLZ5RvdQCZLKH2xW0Eh3e6E6vuMPaVFyIwdmblNQ=",
- "updateDid": {
- "didSuffix": "o5fHLw4RvdQCZLKH2xW0Eh3e6E6vuMPaVeGDIwdmblaD=",
- "actions": [
- {
- "addKey": {
- "key": {
- "id": "issuing-new",
- "usage": "ISSUING_KEY",
- "ecKeyData": {
- "curve": "P-256K",
- "x": "Zk85VxZ1VTo2dxMeI9SCuqcNYHvW7mfyIPR0D9PI9Ic=",
- "y": "QsI8QhEe4Z0YnG4kGZglvYfEPME5mjxmWIaaxsivz5g="
- }
- }
- }
- },
- {
- "revokeKey": {
- "keyId": "issuing"
- }
- }
- ]
- }
- }
-}
-```
-
-Response:
-
-```json
-{}
-```
-
-When the operation is observed in the stable part of the ledger, the node performs the following checks:
-
-```scala
-alias didToUpdate = decoded.operation.UpdateDID.didSuffix
-alias signingKeyId = decoded.signedWith
-alias updateActions = decoded.operation.updateDID.actions
-alias currentDidData = state.dids(didToUpdate).keys
-alias messageSigned = decoded.operation
-
-state.dids.contains(didToUpdate) &&
-state.dids(didToUpdate).keys.contains(signingKeyId) &&
-state.dids(didToUpdate).keys(signingKeyId).usage == MasterKey &&
-state.dids(didToUpdate).lastOperationReference == decoded.operation.previousOperationHash &&
-isValid(decoded.signature, messageSigned, state.dids(didToUpdate).keys(signingKeyId).key) &&
-updateMap(signingKeyId, currentDidData, updateActions).nonEmpty
-
-```
-
-where `updateMap` applies the updates sequentially over the initial state. It verifies that the operation signing key is
-not revoked by any action, that each action can be performed correctly and returns the updated DidData if everything is
-fine. If any check or action application fails, an empty option is returned.
-We will refine the specification of `updateMap` in a future iteration.
-
-
-If the check passes, then we get the following state update:
-
-```scala
-state'.dids = state.dids.update(didToUpdate, { lastOperationReference = hash(decoded),
- keys = updateMap(signingKeyId, currentDidData, updateActions).get }
-state'.credentials = state.credentials
-```
-
-### IssueCredential
-
-Publishes a proof of existence for a credential given hash of its contents. It must be signed by one of issuer's current
-issuing keys.
-
-**NOTES**:
-- We are not enforcing that the signature of the RPC is the same as the credential's signature. Should we add this? It
- could be useful to keep flexibility and allow signing with different keys. For example, a university may have a DID
- and be the authority to issue credentials while the credential signers could be other institutions under the university
- control. The revocation control lives still under the sole control of the university. Note that depending on this decision
- we need to add or remove checks in the verification process. I am currently assuming that the keys could be different.
-
-```json
-{
- "keyId": "issuing",
- "signature": "MEUCIQDCntn4GKNBja9LYPHa5U7KSQPukQYwHD2FuxXmC2I2QQIgEdN3EtFZW+k/zOe2KQYjYZWPaV5SE0Mnn8XmhDu1vg4=",
- "operation": {
- "issueCredential": {
- "credentialData": {
- "issuerDIDSuffix": "7cd7b833ba072944ab6579da20706301ec6ab863992a41ae9d80d56d14559b39",
- "contentHash": "7XACtDnprIRfIjV9giusFERzD722AW0+yUMil7nsn3M="
- }
- }
- }
-}
-```
-
-RPC response:
-
-```json
-{
- "id": "a3cacb2d9e51bdd40264b287db15b4121ddee84eafb8c3da545c88c1d99b94d4"
-}
-```
-
-The returned identifier is hex encoding of the hash of the binary representation of `AtalaOperation` message.
-It is used to refer the credential in the revocation operation.
-
-When the operation is observed in the stable part of the ledger, the node performs the following checks:
-
-```
-alias signingKeyId = decoded.keyId
-alias issuerDIDSuffix = decoded.operation.issueCredential.credentialData.issuerDIDSuffix
-alias signature = decoded.signature
-alias messageSigned = decoded.operation
-
-state.dids.contains(issuerDIDSuffix) &&
-state.dids(issuerDIDSuffix).keys.contains(signingKeyId) &&
-state.dids(issuerDIDSuffix).keys(signingKeyId).usage == IssuingKey &&
-state.dids(issuerDIDSuffix).keys(signingKeyId).keyRevocationEvent.isEmpty &&
-isValid(signature, messageSigned, state.dids(issuerDIDSuffix).keys(signingKeyId).key)
-```
-
-If the check passes, then we get the following state update:
-
-```scala
-alias credentialId = getCredId(decoded)
-
-state'.dids = state.dids
-state'.credentials = { state.credentials + (credentialId -> { lastOperationReference = hash(decoded),
- issuerDIDSuffix = issuerDIDSuffix,
- credIssuingEvent = BLOCK_TIMESTAMP
- credRevocationEvent = None
- })
-}
-```
-
-#### RevokeCredential
-
-It must be signed by one of issuer's current issuing keys.
-
-Example:
-
-```json
-{
- "keyId": "issuing",
- "signature": "MEUCIQCbX9aHbFGeeexwT7IOA/n93XZblxFMaJrBpsXK99I3NwIgQgkrkXPr6ExyflwPMIH4Yb3skqBhhz0LOLFrTqtev44=",
- "operation": {
- "revokeCredential": {
- "previousOperationHash": "o8rLLZ5RvdQCZLKH2xW0Eh3e6E6vuMPaVFyIwdmblNQ=",
- "credentialId": "a3cacb2d9e51bdd40264b287db15b4121ddee84eafb8c3da545c88c1d99b94d4"
- }
- }
-}
-```
-
-When the operation is observed in the stable part of the ledger, the node performs the following checks:
-
-```
-alias signingKeyId = decoded.keyId
-alias credentialId = decoded.operation.revokeCredential.credentialId
-alias issuerDIDSuffix = state.credentials(credentialId).issuerDIDSuffix
-alias signature = decoded.signature
-alias messageSigned = decoded.operation
-alias previousHash = decoded.operation.revokeCredential.previousOperationHash
-
-state.credentials.contains(credentialId) &&
-state.credentials(credentialId).lastOperationReference == previousHash &&
-state.dids.contains(issuerDIDSuffix) &&
-state.dids(issuerDIDSuffix).keys.contains(signingKeyId) &&
-state.dids(issuerDIDSuffix).keys(signingKeyId).usage == IssuingKey &&
-state.dids(issuerDIDSuffix).keys(signingKeyId).keyRevocationEvent.isEmpty &&
-isValid(signature, messageSigned, state.dids(issuerDIDSuffix).keys(signingKeyId).key)
-```
-
-If the check passes, then we get the following state update:
-
-```scala
-state'.dids = state.dids
-state'.credentials = { state.credentials + (credentialId -> { lastOperationReference = hash(decoded),
- issuerDIDSuffix = state.credentials(credentialId).issuerDIDSuffix,
- credIssuingEvent = state.credentials(credentialId).credIssuingEvent,,
- credRevocationEvent = Some(BLOCK_TIMESTAMP)
- })
-}
-```
-
-RPC response:
-
-```scala
-{}
-```
-
-### Services
-
-#### Verification
-
-In order to define credential verification we need to define what do we mean by a credential being valid.
-Here we have to distinguish between two types of validity. Given a credential, one notion of validity relates to the
-credential's specific semantic. E.g. a credential could represent a contract between two parties. In such case, our
-protocol has no say on telling if the contract signed is valid with respect to the legal system applied in a certain
-jurisdiction. However, the protocol probably should guarantee that the contract was published on a certain date, was
-signed by an specific issuer with a specific valid key, its expiration date (if any) hasn't occur, and guarantee that
-the credential's content was not altered.
-
-We could define the former type of validity as _credential specific validity_ and the later as _protocol validity_.
-Note that some credentials may not have any specific validations outside the protocol ones. E.g. birth certificates,
-university degrees, national id documents, may only require the protocol validity. But other credentials may require
-additional ones on top of those.
-
-For credential specific validations, we think that specific applications should be built on top of our protocol
-to fulfil specific use cases. The process described below will formalise the steps to verify protocol validity. Note
-that **all** credentials need to be valid according to the protocol independently to the existence of other credential
-specific validations. From now on, we will use the term valid referring to protocol valid.
-
-Intuitively speaking, a credential is valid when an issuer signs it with a valid key and posts the Atala operation in
-the blockchain. The credential will remain valid as long as the credential's expiration date (if any) hasn't passed,
-and there must be no revocation operation posted in the blockchain referring to that credential. Otherwise the
-credential is considered invalid.
-The credential needs to have the following information to perform this checks:
-
-```scala
-- expirationDate: Option[Date]
-- issuerDID: DID
-- signature: Bytes
-- signingKey: String // reference to a key in the issuers DID Document
-```
-
-The key is considered valid if it was added in the `issuerDID`'s DID Document before `credIssuingEvent` and, if the key
-is revoked in the said DID Document, the revocation event occurred after `credIssuingEvent` (i.e. after the credential
-was recorded as issued).
-As mentioned during the description of Issue Credential event, if we enforce that the key that signs the issuance
-request must be the same as the one that signs the credential, then we don't need to check that the credential was
-signed in proper dates because this will be guaranteed by the check performed in the IssueCredential operation.
-If we want to keep the flexibility of using different keys, we need to validate the relation between dates of key
-additions/revocations and credential issuance.
-The flexibility could be useful for an institution that allows other sub-institutions to sign credentials but only
-the main institution can publish the credentials to the blockchain. This same institution is the one used for
-revocation. An example of such relation could be faculties signing university degrees but only the university
-main administration is the one with the power to issue/revoke a credential. We should note that this could be
-simulated with a single DID by having different issuing keys for the faculties.
-
-Expressed with respect to the node state, given a credential C:
-
-```scala
-// C has no expiration date or the expiration date hasn't occur yet, and
-(C.expirationDate.isEmpty || C.expirationDate.get >= TODAY) &&
-// the credential was posted in the chain, and
-state.credentials.get(hash(C)).nonEmpty &&
-// the credential was not revoked, and
-state.credentials(hash(C)).data.credRevocationEvent.isEmpty &&
-// the issuer DID that signed the credential is registered, and
-state.dids.get(C.issuerDID.suffix).nonEmpty &&
-// the key used to signed the credential is in the DID, and
-state.dids(C.issuerDID.suffix).data.get(C.signingKey).nonEmpty
-// the key was in the DID before the credential publication event, and
-state.dids(C.issuerDID.suffix).data(C.signingKey).keyPublicationEvent < state.credentials(hash(C)).data.credPublicationEvent &&
-// the key was not revoked before credential publication event, and
-(
- // either the key was never revoked
- state.dids(C.issuerDID.suffix).data(C.signingKey).keyRevocationEvent.isEmpty ||
- // or was revoked after signing the credential
- state.credentials(hash(C)).data.credPublicationEvent < state.dids(C.issuerDID.suffix).data(C.signingKey).keyRevocationEvent.get
-)
-// the signature is valid
-isValidSignature(
- C,
- state.dids(C.issuerDID.suffix).data(C.signingKey).key
-)
-```
-
-## Tentative schemas
-
-### Verifiable Credential
-
-After reviewing the verification process we can propose this tentative generic credential schema.
-The schema could move around the fields, e.g. the signature field could be an object that contains the key reference.
-
-```scala
-{
- credentialName: String
- expirationDate: Option[Date]
- issuerDID: DID
- signature: Bytes
- signingKey: String // reference to a key in the issuers DID Document
- claim : Object // contains a mapping of key -> values that represent the credential specific data
-}
-```
-
-We should note that this schema is not compliant with W3C standard drafts. It could be adapted
-on many parts.
-
-However, the standard proposes a `credentialStatus` [field](https://www.w3.org/TR/vc-data-model/#status) for which
-documentation currently states:
-
-```text
-credentialStatus
- The value of the credentialStatus property MUST include the:
- + id property, which MUST be a URL.
- + type property, which expresses the credential status type (also referred to as the credential status method). It is
- expected that the value will provide enough information to determine the current status of the credential. For
- example, the object could contain a link to an external document noting whether or not the credential is suspended
- or revoked.
- The precise contents of the credential status information is determined by the specific credentialStatus type
- definition, and varies depending on factors such as whether it is simple to implement or if it is privacy-enhancing.
-```
-
-and later says
-
-```text
-Defining the data model, formats, and protocols for status schemes are out of scope for this specification. A Verifiable
-Credential Extension Registry [VC-EXTENSION-REGISTRY] exists that contains available status schemes for implementers who
-want to implement verifiable credential status checking.
-```
-
-but the [extension registry](https://w3c-ccg.github.io/vc-extension-registry/) provides no relevant data.
-
-It is unclear at the moment of this writing how to define this field. We may need to
-review more sections in the standard to clarify this point.
-
-## Notes
-
-Below we raise points that may affect the protocol and schemas. We also note tasks
-related to the protocol that we should perform.
-
-- Add a `RecoveryKey` value to the `KeyUsage` type. It could be useful to allow recovering the control over a DID even
- if the master key is lost/compromised. It should be non-revocable by any other key but itself (leaving an exception to
- our DIDUpdate operation).
-- Allow to publish both batched operations or individual operations in transactions' metadata.
- Batching operations forces us the need to maintain a CAS which also brings possible problems related to files missing
- in a CAS. On the other hand, batching operations reduces fees. If IOHK restricts who can issue operations (in order
- to ease problems related to who add files into the CAS), then users could complain about centralization. We find then,
- as a reasonable approach, to allow any user to post an operation as long as it entire content is stored directly in
- the transaction metadata. This allows for a batching service provided by trusted parties and also the option to
- maintain independence for those who prefer it.
-- Allow issuers to not publish an issuance operation in the blockchain. During conversations we noted that the protocol
- always publishes a proof of existence on-chain along with the issuer DID. This allows for any actor to count how many
- credentials an issuer produces. This may be useful for some cases and we wonder if it could be a problem for others.
- If we remove the correlation of publication events with issuer's data, then the issuer looses the ability to detect if
- a credential is issued without authorization (e.g. due to a compromised key).
- If we want this correlation to be optional, we need to change the verification process and also define how issuers
- would specify which type of credentials they issue. Note that issuers need to specify somewhere that their credentials
- require a publication event to be valid, if not an attacker could simple issue credentials without this event and nodes
- wouldn't detect that the event is required. A possible place for such configuration is the issuer's DID itself, but we
- should analyse the limitations of such approach.
-- Define an approach to privacy and share partial information from credentials.
- This is needed for compliance with W3C. This could also enable many interesting use cases.
-- Define if we intend to be W3C standard, if so, update schemas and operations appropriately.
-- Estimate bytes consumed by normal operation and estimate ADA fees.
-- Define processes to establish trust on DIDs and how to map real world identities behind them.
-- Define if we need credentials with multiple issuers.
-- The W3C standard mentions `validFrom` and `validUntil` fields (where `validFrom` can be a date in the future). Decide
- if we want to update the verification process based on such fields and how to manage date verification (e.g. should we
- simply trust the issuer?).
-- We can add other credential statuses. This is, instead of publishing a credential revocation event, we could post an
- update event that adds other statuses like `temporaly suspended` which can then transition back to `issued`. We
- should consider privacy implications of such update.
- E.g. a driver licence could keep forever a trace of times it has been suspended.
-- We should define a way to inform verifiers more information about the credential status. This is akin to what the
- `credentialStatus` field attempts to do, which is basically to have a URL to check status and other information (like
- revocation reason). We should review if this field is mandatory or optional in the standard.
-- If we decide to batch operations in files, we should consider how to order them. We could list first all DID related
- operations and then the credentials related ones. The motivation is that a user could send to a node multiple requests
- and expect the order to be preserved, e.g. first create a DID and then issue a sequence of credentials. If the order
- of events is inverted by the node, then we may end up rejecting the issuance of credentials and then creating a DID.
- An alternative to order events could be to provide a different endpoint for the node to receive batches of operations.
diff --git a/docs/protocol/protocol-v0.3.md b/docs/protocol/protocol-v0.3.md
deleted file mode 100644
index aafa5776cd..0000000000
--- a/docs/protocol/protocol-v0.3.md
+++ /dev/null
@@ -1,784 +0,0 @@
-
-
-\newpage
-
-# Slayer v3: Scaling without a second layer
-
-## Motivation
-
-The goal of the protocol that we describe in this document, is to construct a scalable, decentralised and secure
-identity system. The system must provide the following features:
-
-1. Allow the decentralised creation of self certifiable identifiers. This is, any person can create an identifier
- without the need of coordination with, or permission from any external authority. Only the creator (controller) of
- the identifier can prove his ownership through the use of cryptographic techniques.
-2. Allow controllers to update the state of their identifiers.
-3. Given an identifier, allow anyone to obtain its current state.
-4. Allow asserting claims using the identifiers.
-
-In particular, we will refer to these identifiers as **D**ecentralised **ID**entifiers (DIDs). There is a working group
-in W3C specifying the nature of these entities. For the purpose of this document, we will simplify their definition
-and declare that a DID is a string, which is associated to a document (DID Document). A DID Document declares the
-state of its associated DID. The first DID Document declares the *initial state* associated to a DID. This document
-contains cryptographic keys that allow controllers to prove their ownership over the DID, and to update the associated
-document.
-The DID Document can also contain other data, such as, URLs, referring to external information about the associated DID
-controller. The document can also be updated. In our construction, the identifier (DID) has the form
-`did:prism:hash_initial_DID_Document`, making our DIDs, self-certifiable. That is, given a DID and its initial DID
-Document, anyone could verify that the document is really associated to the identifier by comparing the identifier to
-the hash of the document.
-
-In previous versions of our protocol, we followed the ideas behind Sidetree.
-Sidetree is a protocol developed by Microsoft, and is currently being specified in a Working Group inside the
-Decentralised Identity Foundation. In a simplified explanation, independent nodes run the Sidetree protocol using an
-underlying blockchain as base layer. Each node can post file references in the metadata (or equivalent) field the
-blockchain's transactions. The references point to files in a publicly accessible content addressable storage (CAS)
-service. The files contain the following events:
-
- - Create DID: An event that declares the creation of an identifier, and declares its initial DID Document.
- - Update DID: An event that allows adding/removing data to the associated DID Document.
- - Deactivate DID: An event that declares that the DID is not usable anymore.
-
-All events (except for DID creation ones), are signed by adequate keys to prove validity. The current state of a DID
-Document is computed by taking the initial state (provided on DID creation), and applying the events found in files
-referenced on the underlying blockchain following the order in which they appear.
-
-With respect to claims, there is [a variation of Sidetree](https://hackmd.io/tx8Z0mIRS-aK84Gx4xIzfg?view) that allows
-DID controllers to sign statements and post hashes of them as events in the protocol files (the ones referenced by
-blockchain transactions). This action, allows to timestamp the statement assertion while not revealing the statement
-content. The variation also allows the protocol to revoke an already asserted statement, also timestamping that
-event by adding it to a file.
-
-The feature of `batching` events in files, and posting only references on-chain, allows a considerable scalability
-performance. However, we can observe some drawbacks related to this approach:
-1. Data availability problems (referenced files may not be available). This leads to the inability to obtain consensus
- about the order in which events occurred in the past. Furthermore, it makes it difficult to know if certain event
- were ever valid at all. This allows a situation known as ["late publication"](./late-publish.md), that could affect
- certain use cases (e.g. DID transferability).
-
- Late publication also represents a potential change to the "past" of the system state.
-2. Even though batching increases events throughput, it comes with the need of an anti spam strategy to avoid garbage
- events to saturate the system. This observation makes us think that the existence of batching does not necessarily
- imply lower operation costs or lower fees for users.
-3. The problems related to data availability lead to not trivial implementation considerations.
-
-This document explores some changes to the previous version of our protocol (0.2), in order to balance better
-performance (in terms of event throughput) while avoiding the issues related to data availability.
-
-## Changes proposed
-
-### Credential issuance
-
-[In version 0.2](./protocol-v0.2.md), we describe an issuance operation that, in a simplified way, contains:
-
-``` scala
-IssueCredential(
- issuerDIDSuffix: ...,
- keyId: ...,
- credentialHash: ...,
- signature: ...
-)
-```
-
-Following Sidetree's approach, one would construct a file, `F1`, of the form
-
-```scala
-IssueCredential(issuerDIDSuffix1, keyId1, credentialHash1, signature1)
-IssueCredential(issuerDIDSuffix2, keyId2, credentialHash2, signature2)
-···
-IssueCredential(issuerDIDSuffixN, keyIdN, credentialHashN, signatureN)
-```
-
-and we would post a transaction containing the hash of `F1` on chain while storing `F1` in a CAS.
-
-However, it is reasonable to believe that, an issuer, will produce many credentials in batches. Meaning that each issuer
-could create a file where all the operations will be signed by the same key. This would lead to a file of the form:
-
-```scala
-IssueCredential(issuerDIDSuffix, keyId, credentialHash1, signature1)
-IssueCredential(issuerDIDSuffix, keyId, credentialHash2, signature2)
-···
-IssueCredential(issuerDIDSuffix, keyId, credentialHashN, signatureN)
-```
-
-Given that all the operations would be signed by the same key, one could replace the `N` signatures and occurrences of
-the `issuerDIDSuffix` and `keyId` by one, leading to a file with a single operation of the form:
-
-``` scala
-IssueCredentials(issuerDIDSuffix, keyId,
- credentialHash1,
- credentialHash2,
- ···
- credentialHashN,
-signature)
-```
-
-Now, at this point, one could ask, could we replace the list of hashes for something shorter? The answer is, yes.
-
-An issuer could take the list of credential hashes `credentialHash1, credentialHash2, ..., credentialHashN` and compute
-a Merkle tree with them. Obtaining one root hash and `N` proofs of inclusion.
-
-```scala
-MerkleRoot,
-(credentialHash1, proofOfInclusion1),
-(credentialHash2, proofOfInclusion2),
-···
-(credentialHashN, proofOfInclusionN),
-```
-
-Now, the issuer could simply post this operation on the metadata of a transaction:
-
-```scala
-IssueCredentials(issuerDIDSuffix, keyId, merkleRootHash, signature)
-```
-
-and share with each corresponding holder, the pair `(credential, p)` where:
-
-- `credential` is the credential to share and,
-- `p` is the proof of inclusion that corresponds to `credential`
-
-By doing this, a holder could later share a credential to a verifier along with the proof of inclusion. The verifier
-would perform a modified version of the verification steps from version 0.2 of the protocol. See last section for the
-formal description.
-
-A remaining question is, why would we want each credential operation separate in the first place? The answer is that,
-our intention is for the issuer to be able to detect if an unauthorized credential is issued. So, does this change
-affect that goal? Our argument is that, it does not. In order to detect that a credential was issued without
-authorization, the issuer needed to check each `IssueCredential` operation posted and check for those signed by his keys,
-then compare those to the ones in his database and make a decision. With the proposed change, the issuer would perform
-the same steps, he would check all the merkle tree hashes posted with his signature and compare if that hash is
-registered in his database.
-
-In conclusion, this change could allow us to remove the need of external files for issuing credentials and allow
-credential issuance to be fully performed on-chain using Cardano's metadata.
-
-### Credential revocation
-
-The proposed protocol changes that we will describe below, do not scale the throughput of credential revocations as much
-as the increase we added for issuance. However, we could argue that credential revocation should be a type of event with
-low throughput demand in comparison.
-
-If we analyse revocation scenarios, we could see some special cases:
-
-- The issuer detected a merkle root that is not authorised. In such case, he could revoke the full represented batch with
- a small operation of the form:
- ```scala
- RevokeCredentials(
- issuanceOperationHash,
- keyId,
- signature
- )
- ```
- The associated DID is the one from the referred `IssueCredential` operation.
-- Another situation is when an issuer would like to revoke specific credentials _issued in the same batch_. For this
- case, we could see the use of an operation:
- ```
- RevokeCredentials(
- issuanceOperationHash,
- keyId,
- credentialHash1,
- credentialHash2,
- ···,
- credentialHashK,
- signature
- )
- ```
-
-Each hash adds approximately 32 bytes of metadata, which adds very little fee. See [fee](#fee-estimations) for more
-details on fees.
-
-Now, if the issuer needs to revoke credentials issued in different batches, he could post independent transactions per
-batch. Compared to version 0.2 of this protocol, we can revoke batches of credentials with a single operation in certain
-cases. Before, we needed one operation per credential to be revoked.
-
-We explored the idea of having an RSA accumulator per credential type. Our preliminary research makes us believe that
-the approach may require the use of big prime numbers, and the accumulator may occupy a few kilobytes of space. We
-decided to leave out the idea for a future version and request the suggestions from a cryptographer.
-
-### DID creation
-
-In order to create a DID, in version 0.2, we have to post a `CreateDID` operation on-chain. Sidetree allows to scale the
-amount of DIDs to create by the use of batches. However, they also provide a
-[long-form](https://identity.foundation/sidetree/spec/#long-form-did-uris) that allows to use a DID before it is
-published. The long form also allows to never publish a DID if it is never updated.
-
-We propose, for this protocol version, to leave DID creation operations as optional, just for the purpose of
-timestamping. This is:
-
-- The initial DID could be posted on-chain as in version 0.2, or
-- It could have a long format that describes its initial state and no event will be published. The format could,
- informally be thought as
-
- ```text
- did:prism:hash_initial_DID_Document?initialState=InitialDIDDocument
- ```
- The initial DID document can be validated by the DID suffix.
-- Another alternative is that many DIDs created by the same entity, could construct a Merkle tree (as we propose for
- credential hashes) and post the hash of the root on-chain for later timestamping proving.
-
-In Sidetree's slack, we received the feedback that they see this as an evolution they plan for the protocol with some
-differences from our proposal.
-
-- They do not care about batched creation timestamping as we propose.
-- During the first update operation, they would like to post the initial DID state along with the update operation.
- They prefer to store the initial state in the CAS rather than sharing a longer identifier on every interaction.
- We could leave this as two variations of the first update operation in our protocol, but there are considerations to
- have.
- 1. Not publishing the initial state leads to smaller metadata to add to the first update transaction. It also allows
- more privacy. One drawback is that invalid update operations would not be "prunable" until the associated DID is
- resolved for the first time. This could be mitigated by associating DIDs to addresses as mentioned in [other
- ideas](./protocol-other-ideas.md).
- 2. In the variation where the initial state is posted along with the first update, we would have bigger metadata
- use. We should be careful to not exceed reasonable metadata size. See
- [metadata considerations](#metadata-usage-concerns) for more comments on metadata usage.
-
-For simplicity, we incline for option 2. As it would require fewer changes in the way we process operations. Currently,
-when the node finds a new operation, it applies the state change right away. If we do not post the initial DID state, we
-would need to process operations differently, as DID update operations would have no initial state to be applied upon.
-Similarly, we need to have the DID state at the time of credential issuance and revocation. We should allow to publish a
-DID during the first issuance operation too. Note that we won't need to publish a DID during the first revocation, because
-the first revocation must occur after an issuance event. Hence, we will request the signing DID to already be published
-by this point.
-
-In practice, we have implemented the above two cases through "on-chain batching", meaning that we allow users to publish
-a sequence of events in a single blockchain transaction. In order to publish a DID during its first update, the user can
-post the `CreateDID` event and the `UpdateDID` events in the same underlying transaction. Similarly, the user can also
-publish a `CreateDID` event along with an event to issue a batch of credentials. Any other combination of events can be
-published too, the only restriction is for the entire sequence to be small enough too fit in a single transaction
-metadata field.
-
-The use of a long format would enable us to create DIDs without the need of batching or any on-chain event whatsoever.
-This leads to unbound throughput for DID creation.
-
-*NOTE:* Even though there does not seem to be a maximum value for DID length, we should be aware of such discussions in
-DID core or similar groups. We have been warned that QR codes would not be good for DIDs with "big" long form. we could
-evaluate compressing the data inside the QR and decompressing it at the receiving end.
-
-### DID updates
-
-This is the only operation that cannot be scaled on-chain in an easy way. The main reason is that update operations are
-expected to be publicly available. This implies that a commitment (e.g. hash, merkle tree, RSA accumulator, etc.) is not
-enough for the users of the protocol. Nodes need the actual update data.
-
-A question we may have is, how often do we expect an update to occur? If this is not a recurrent operation, we should
-consider leaving it as an on-chain operation.
-
-Alternatively, we could consider adding a permissioned batching service. This is:
-- IOHK (and selected actors) could have a DIDs that can sign authorised batches.
-- Some drawbacks are the complexities added by a missing trust model on who can batch update events, and also
- complexities to handle data availability problems.
-
-Originally, we were thinking about having permissioned batches for every type of operation. In the worst case scenario,
-it now seems that we would only require batches for DID updates.
-We should keep evaluating if this is needed for this version.
-
-Another option could be to allow "on-chain batching". Note that we have a bit less than 16Kb for transaction metadata.
-Consider that updating a DID consists on:
-
-- Adding a key: requires a keyId and a key.
-- Removing a key: requires a keyId.
-- Adding/removing a service endpoint: an id and the endpoint when adding it.
-
-If we imagine small update operations we could allow users to cooperate and batch up updates in a single ADA
-transaction.
-
-- Actors could take periodic turns to distribute fee costs.
-- We could ask a cryptographer if there is any signature aggregation scheme that could help to reduce metadata size.
-- We would like to refer the reader to the [metadata considerations](#metadata-usage-concerns) section to be aware of
- potential problems with "on-chain" batching.
-
-## Fee estimations
-
-According to [Shelley documentation](https://github.com/input-output-hk/cardano-ledger-specs/blob/master/shelley/design-spec/delegation_design_spec.tex#L1999),
-there is a maximum transaction size. The documentation also states the following about fees:
-
-> The basic transaction fee covers the cost of processing and storage. The formula is
-> ```
-> a + bx
-> ```
->
-> With constants `a` and `b`, and `x` as the transaction size in bytes.
-
-The constants, according to [Shelley parameters](https://hydra.iohk.io/build/3670619/download/1/index.html) are:
-
-```json
- "maxTxSize": 16384,
- "maxBlockBodySize": 65536,
- "maxBlockHeaderSize": 1100,
- "minFeeA": 44,
- "minFeeB": 155381,
-```
-
-Interestingly enough, the values `a` and `b` are inverted (a typo in the documentation already reported and confirmed).
-From the above data, the maximum transaction fee that could be created is
-
-```scala
-maxFee = minFeeB + maxTxSize*minFeeA = 155381 + 16384*44 = 876277 lovelace = 0.876277 ADA
-```
-
-which represents a 16 kilobyte transaction.
-
-Let us estimate fees per operation type. We will add extra bytes in our estimations due to the [metadata scheme enforced
-by Cardano](https://github.com/input-output-hk/cardano-ledger-specs/blob/master/shelley/design-spec/delegation_design_spec.tex#L4547).
-
-We will assume:
-
-- A base transaction size (i.e. without the metadata) of 250 bytes.
-- A signature size of [75 bytes](https://crypto.stackexchange.com/questions/75996/probability-of-an-ecdsa-signature/75997#75997)
-- A hash size of 32 bytes.
-- A keyId size of 18 bytes.
-- A key size of 32 bytes.
-- A DID suffix of 32 bytes.
-
-Given the above, we could estimate:
-
-- DID creation without publishing would have cost of 0 ADA and uses no metadata nor transactions.
-- The old DID creation (publishing the operation) with two keys (one Master and one for Issuing) would have:
- - At least two key ids (2 x 18)
- - At least two keys (2 x 32)
- - We currently also have a signature (75)
- Adding the base transaction, we could overestimate this with a 400 bytes transaction, leading to:
- ```scala
- minFeeB + 400*minFeeA = 155381 + 400*44 = 172981 lovelace = 0.172981 ADA
- ```
-- DID "batched" timestamping would have an operation identifier and a Merkle root hash.
- We could overestimate this with a 300 bytes transaction.
-
- ```scala
- minFeeB + 300*minFeeA = 155381 + 300*44 = 168581 lovelace = 0.168581 ADA
- ```
-
-- If we imagine a credential issuance operation (where the issuing DID was already published), we have:
- - an issuer DID suffix (32)
- - a signature (75)
- - a keyId (18)
- - a Merkle root hash (32)
- Overestimating, this leads to 450 bytes of metadata, leading to an issuance fee of:
-
- ```scala
- minFeeB + 450*minFeeA = 155381 + 450*44 = 175181 lovelace = 0.175181 ADA
- ```
-
- If we need to consider the data to publish the DID during the issuance operation, we could add:
- - 2 key ids (2 x 18)
- - 2 keys (2 x 32)
- Meaning that we add 100 bytes, leading to:
- ```scala
- minFeeB + 550*minFeeA = 155381 + 550*44 = 179581 lovelace = 0.179581 ADA
- ```
-- For revocation, we could analyse the cost per type of revocation.
- - If the issuer revokes the full batch of credentials, it looks reasonable to estimate a similar cost that the
- issuance operation, i.e. ~0.175181 ADA.
- - If the issuer needs to revoke selective credentials from a single batch, we could expect a fee similar to the one
- before with an addition of 2000 lovelace (32 bytes * 44 lovelace/byte + encoding bytes)
- per credential hash. This represents 0.0002 extra ADA per revoked credential.
-- DID updates would require a more variable estimation. Sidetree
- [suggests](https://identity.foundation/sidetree/spec/#default-parameters) a maximum of 1 kilobyte for the size of an
- update operation. In comparison, this would represent an approximate of:
- ```scala
- minFeeB + 1024*minFeeA = 155381 + 1024*44 = 200437 lovelace = 0.200437 ADA
- ```
-
-We could apply some optimizations in exchange for making a slightly more complex protocol.
-See [related work](./protocol-other-ideas.md).
-
-### Metadata usage concerns
-
-Even though ledger rules allow for transactions with big metadata, we should be aware that this is not a guarantee that
-the network will accept them. For example, Bitcoin transactions with more than one `OP_RETURN` output are valid
-according to consensus rules, however, they are not considered "standard" transactions and nodes do not tend to
-forward them [1](https://medium.com/@alcio/an-inefficient-use-of-bitcoin-16281b975cae#:~:text=Transactions%20with%20multiple%20OP_RETURN%20outputs,by%20peers%20on%20the%20network.).
-[2](https://www.frontiersin.org/articles/10.3389/fbloc.2019.00007/full). We raise this observation to motivate the most
-efficient use of metadata bytes.
-It may be reasonable to adopt a conservative design principle of "the smaller, the better".
-
-## Formal changes
-
-In this section we would like to summarise the protocol with respect to a more formal definition of the operations and
-node state.
-
-## Node State
-
-Given that we will now batch credential issuance and also optionally timestamp DID creation batches,
-we will update our node state definition. Let us start with type definitions:
-
-```scala
-// Abstract types
-type Key
-type Hash
-type Date
-
-// Concrete types
-type KeyUsage = MasterKey | IssuingKey | AuthenticationKey | CommunicationKey
-type KeyData = {
- key: Key,
- usage: KeyUsage,
- keyAdditionEvent: Date, // extracted from the blockchain
- keyRevocationEvent: Option[Date] // extracted from the blockchain
-}
-
-type DIDData = {
- lastOperationReference: Hash,
- keys: Map[keyId: String, keyData: KeyData]
-}
-
-type CredentialBatch = {
- issuerDIDSuffix: Hash,
- merkleRoot: Hash,
- batchIssuingEvent: Date, // extracted from the blockchain
- batchRevocationEvent: Option[Date] // extracted from the blockchain
-}
-
-// Node state
-type State = {
- didTimestamps : Map[didBatchId: Hash, timestamp: Date]
- publishedDids : Map[didSuffix: Hash, data: DIDDocument],
- credentialBatches : Map[credentialBatchId: Hash, data: CredentialBatch]
- revokedCredentials: Map[credentialBatchId: Hash, Map[credentialHash: Hash, credentialRevocationEvent: Date]]
-}
-```
-
-Given the above, we define the node initial state as:
-
-```scala
-state = {
- didTimestamps = Map.empty,
- publishedDids = Map.empty,
- credentialBatches = Map.empty,
- revokedCredentials = Map.empty
-}
-```
-
-We will add an additional value called `BeginingOfTime` which will be used to represent timestamps for keys that belong
-to unpublished DIDs.
-
-## DID Creation
-
-The DID creation process from v0.2 remains supported and unchanged.
-It will run the same validations and only update the state of `publishedDids`.
-
-## DID batch Timestamping
-
-A difference with other operations is that to timestamp a batch of DIDs, we will not have
-a signature involved. Recall that on DID creation we ask for a Master key signature.
-So, given an operation:
-
-```json
-{
- "operation": {
- "timestampDIDBatch" : {
- "markleRoot" : ...
- }
- }
-}
-```
-
-which posts a Merkle tree root hash, we update the state as follows:
-
-```scala
-state'.didTimestamps = state.didTimestamps + { operation.timestampDIDBatch.markleRoot -> TX_TIMESTAMP }
-state'.publishedDids = state.publishedDids
-state'.credentialBatches = state.credentialBatches
-state'.revokedCredentials = state.revokedCredentials
-```
-
-## DID Update
-
-Given that we implemented on-chain batching, we do not need to update this operation.
-For completeness, we will describe the formal specification of this event.
-
-```json
-{
- "signedWith": KEY_ID,
- "signature": SIGNATURE,
- "operation": {
- "previousOperationHash": HASH,
- "updateDid": {
- "didSuffix": DID_SUFFIX,
- "actions": [
- ADD_KEY_ACTION | REMOVE_KEY_ACTION,
- ...
- ]
- }
- }
-}
-```
-
-When the operation is observed in the stable part of the ledger, the node will act as before,
-this is:
-
- ```scala
- alias didToUpdate = decoded.operation.updateDid.didSuffix
- alias signingKeyId = decoded.signedWith
- alias updateActions = decoded.operation.updateDID.actions
- alias messageSigned = decoded.operation
-
- state.publishedDids.contains(didToUpdate) &&
- state.publishedDids(didToUpdate).keys.contains(signingKeyId) &&
- state.publishedDids(didToUpdate).keys(signingKeyId).usage == MasterKey &&
- state.publishedDids(didToUpdate).lastOperationReference == decoded.operation.previousOperationHash &&
- isValid(decoded.signature, messageSigned, state.publishedDids(didToUpdate).keys(signingKeyId).key) &&
- updateMap(signingKeyId, state.publishedDids(didToUpdate).keys, updateActions).nonEmpty
- ```
-
- where `updateMap` applies the updates sequentially over the initial state. It verifies that the operation signing key
- is not revoked by any action, that each action can be performed correctly and returns the updated `DidData` if
- everything is fine. If any check or action application fails, an empty option is returned.
- We will refine the specification of `updateMap` in a future iteration.
-
- If the check passes, then we get the following state update:
- ```scala
- state'.publishedDids = state.publishedDids.update(didToUpdate, { lastOperationReference = hash(decoded),
- keys = updateMap(signingKeyId, state.publishedDids(didToUpdate).keys, updateActions).get }
- state'.didTimestamps = state.didTimestamps
- state'.credentialBatches = state.credentialBatches
- state'.revokedCredentials = state.revokedCredentials
- ```
-
-## Credential Batch Issuance
-
-Now, similar to the situation with the first update operation, we find ourselves with an operation that will be signed
-by a key referenced by a DID. We said that we implemented on-chain batching, so we do not need to embed the DID creation
-event in this operation.
-
-The old credential issuance operation can be described in the following way:
-
-```json
-{
- "keyId": KEY_ID,
- "signature": SIGNATURE,
- "operation": {
- "issueCredentialBatch": {
- "batchData": {
- "issuerDIDSuffix": DID_SUFFIX,
- "merkleRoot": MERKLE_TREE_ROOT
- }
- }
- }
-}
-```
-
-The response is now a `batchId` that represents the batch analogous to the old `credentialId`:
-
-```json
-{
- "batchId" : OPERATION_HASH
-}
-```
-
-Its implementation will be the hash of the `batchData` field.
-
-The node state is now updated as follows:
-
- ```scala
- alias signingKeyId = decoded.keyId
- alias issuerDIDSuffix = decoded.operation.issueCredential.batchData.issuerDIDSuffix
- alias signature = decoded.signature
- alias messageSigned = decoded.operation
-
- state.dids.contains(issuerDIDSuffix) &&
- state.dids(issuerDIDSuffix).keys.contains(signingKeyId) &&
- state.dids(issuerDIDSuffix).keys(signingKeyId).usage == IssuingKey &&
- state.dids(issuerDIDSuffix).keys(signingKeyId).keyRevocationEvent.isEmpty &&
- isValid(signature, messageSigned, state.dids(issuerDIDSuffix).keys(signingKeyId).key)
- ```
-
- The state would be updated as follows:
-
- ```scala
- alias batchId = computeBatchId(decoded)
- alias merkleRoot = decoded.operation.issueCredential.batchData.merkleRoot
- alias issuerDIDSuffix = decoded.operation.issueCredential.batchData.issuerDIDSuffix
-
- state'.publishedDids = state.publishedDids
- state'.didTimestamps = state.didTimestamps
- state'.credentialBatches = state.credentialBatches + { batchId -> {
- merkleRoot = merkleRoot,
- issuerDIDSuffix = issuerDIDSuffix,
- batchIssuingEvent = LEDGER_TIMESTAMP,
- batchRevocationEvent = None
- }
- }
- state'.revokedCredentials = state.revokedCredentials
- ```
-
-## Batch Revocation
-
-We now define the revocation for a batch of issued credentials.
-
-```json
-{
- "keyId": KEY_ID,
- "signature":: SIGNATURE,
- "operation": {
- "revokeBatch": {
- "batchId": HASH
- }
- }
-}
-```
-
-The preconditions to apply the operation are:
-
-```scala
-alias batchId = decoded.operation.revokeBatch.batchId
-alias signature = decoded.signature
-alias signingKeyId = decoded.keyId
-alias messageSigned = decoded.operation
-alias issuerDIDSuffix = state.credentialBatches(batchId).issuerDIDSuffix
-
-state.credentialBatches.contains(batchId) &&
-state.credentialBatches(batchId).batchRevocationEvent.isEmpty &&
-state.publishedDids.contains(issuerDIDSuffix) &&
-state.publishedDids(issuerDIDSuffix).keys.contains(signingKeyId) &&
-state.publishedDids(issuerDIDSuffix).keys(signingKeyId).usage == IssuingKey &&
-state.publishedDids(issuerDIDSuffix).keys(signingKeyId).keyRevocationEvent.isEmpty &&
-isValid(signature, messageSigned, state.publishedDids(issuerDIDSuffix).keys(signingKeyId).key)
-```
-
-If the precondition holds, we update the node state as follows:
-
-```scala
-alias batchId = decoded.operation.revokeBatch.batchId
-alias initialBatchState = state.credentialBatches(batchId)
-
-state'.publishedDids = state.publishedDids
-state'.didTimestamps = state.didTimestamps
-state'.revokedCredentials = state.revokedCredentials
-state'.credentialBatches = state.credentialBatches + { batchId -> {
- issuerDIDSuffix = initialBatchState.issuerDIDSuffix,
- merkleRoot = initialBatchState.merkleRoot,
- batchIssuingEvent = initialBatchState.batchIssuingEvent,
- batchRevocationEvent = Some(OPERATION_LEDGER_TIME)
- }
- }
-```
-
-## Credentials Revocation
-
-We are finally on the credential revocation operation. We will update the operation from version 0.2 to now allow many
-credential hashes to revoke.
-
-```json
-{
- "keyId": KEY_ID,
- "signature":: SIGNATURE,
- "operation": {
- "revokeCredentials": {
- "batchId": HASH,
- "credentialHashes": [ HASH, ...],
- }
- }
-}
-```
-
-The preconditions to apply the operation are:
-
-```scala
-alias batchId = decoded.operation.revokeCredentials.batchId
-alias signature = decoded.signature
-alias signingKeyId = decoded.keyId
-alias messageSigned = decoded.operation
-alias issuerDIDSuffix = state.credentialBatches(batchId).issuerDIDSuffix
-alias credentialHashes = decoded.operation.revokeCredentials.credentialHashes
-
-state.credentialBatches.contains(batchId) &&
-// the batch was not already revoked
-state.credentialBatches(batchId).batchRevocationEvent.isEmpty &&
-state.publishedDids.contains(issuerDIDSuffix) &&
-state.publishedDids(issuerDIDSuffix).keys.contains(signingKeyId) &&
-state.publishedDids(issuerDIDSuffix).keys(signingKeyId).usage == IssuingKey &&
-state.publishedDids(issuerDIDSuffix).keys(signingKeyId).keyRevocationEvent.isEmpty &&
-isValid(signature, messageSigned, state.publishedDids(issuerDIDSuffix).keys(signingKeyId).key)
-```
-
-If the above holds, then:
-
-```scala
-alias batchId = decoded.operation.revokeCredential.batchId
-// we will only update the state for the credentials not already revoked
-alias credentialHashes = filterNotAlreadyRevoked(decoded.operation.revokeCredential.credentialHashes)
-alias issuerDIDSuffix = state.credentialBatches(batchId).issuerDIDSuffix
-
-state'.publishedDids = state.publishedDids
-state'.didTimestamps = state.didTimestamps
-state'.credentialBatches = state.credentialBatches
-
-state'.revokedCredentials =
- state.revokedCredentials + { batchId -> state.revokedCredentials(batchId) + buildMap(credentialHashes)
- }
-```
-
-where `buildMap` takes the credential hashes and computes a map from the credential hashes to their revocation ledger
-time.
-
-**Note:** The reader may realise that an issuer could mark as "revoked" credentials that are actually not part of the
-referred batch. This is because there is no check that the credential hashes shared are contained in the batch. The
-reason why we didn't add this check, is that merkle proof of inclusion for many credentials are big in size.
-We argue, however, that the verification process will take a credential and check for revocation _in its corresponding_
-batch, meaning that no issuer will be able to revoke other issuers credentials (or even a credential outside of its
-issuance batch).
-
-## Credential Verification
-
-In order to describe credential verification, we assume the following data to be present in the credential.
-
-```json
-{
- "issuerDID" : DiD,
- "signature" : SIGNATURE,
- "keyID" : KEY_ID,
- ...
-}
-```
-
-We also assume the presence of a Merkle proof of inclusion, `merkleProof` that attests that the credential is contained
-in its corresponding batch.
-
-Given a credential `c` and its Merkle proof of inclusion `mproof`, we define `c` to be valid if and only if the
-following holds:
-
-```scala
-alias computedMerkleRoot = computeRoot(c, mproof)
-alias batchId = computeBatchId(c, computedMerkleRoot) // we combine the data to compute the batch id
-alias issuerDIDSuffix = c.issuerDID.suffix
-alias keyId = c.keyId
-alias signature = c.signature
-alias credentialHash = hash(c)
-
-// control key data
-state.publishedDids.contains(issuerDIDSuffix) &&
-state.publishedDids(issuerDIDSuffix).keys.contains(keyId) &&
-state.publishedDids(issuerDIDSuffix).keys(keyId).usage == IssuingKey &&
-// check that the credential batch matches the credential data
-state.credentialBatches.contains(batchId) &&
-state.credentialBatches.contains(batchId).issuerDIDSuffix == issuerDIDSuffix &&
-state.credentialBatches.contains(batchId).merkleRoot == computedMerkleRoot &&
-// check that the batch was not revoked
-state.credentialBatches(batchId).batchRevocationEvent.isEmpty &&
-// check that the specific credential was not revoked
-(
- ! state.revokedCredentials.contains(batchId) ||
- ! state.revokedCredentials(batchId).contains(credentialHash)
-) &&
-// check the key timestamp compared to the credential issuance timestamp
-(
- state.publishedDids(issuerDIDSuffix).keys(keyId).keyAdditiontionEvent < state.credentialBatches(batchId).batchIssuingEvent &&
- (
- state.publishedDids(issuerDIDSuffix).keys(keyId).keyRevocationEvent.isEmpty ||
- state.credentialBatches(batchId).batchIssuingEvent < state.publishedDids(issuerDIDSuffix).keys(keyId).keyRevocationEvent.get
- )
-) &&
-// check the credential signature
-isValid(signature, c, state.publishedDids(issuerDIDSuffix).keys(keyId).key)
-```
-
-We want to remark that the key that signs a credential does not need to be the same key that signs the credential batch
-issuance event. This is why we add checks to control that the signing key in the credential was already present in the
-DID at the time the batch occurred and, if revoked, we request that the revocation occurred after the credential was
-issued.
-
-We also want to remark that, currently, the key that signs the credential and the key that signs the issuance operation
-must belong to the same DID. The reason is that the DID extracted from the credential is the one used to compute the
-`batchId`. In order to allow one DID to sign the credential and another one to sign the issuance operation, we would
-need to add two DIDs to the credential and specify which DID should be used for each check.
diff --git a/docs/protocol/selective-disclosure-merkle-trees.md b/docs/protocol/selective-disclosure-merkle-trees.md
deleted file mode 100644
index 6e293a238d..0000000000
--- a/docs/protocol/selective-disclosure-merkle-trees.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Selective disclosure
-
-## Context
-
-In our current design, when a holder shares a credential, all the information stored in the credential is
-shared. This is not ideal for self-evident privacy reasons, the holder may like to prove a verifier a set
-of sub-claims that are present in a credential instead of displaying all.
-
-In this document, we will describe a first approach for improving our situation. We will add a way to
-hide claims inside credentials, and allow the holder to select which of those hidden claims will be
-revealed to each verifier.
-
-## Approach
-
-We will allow the issuer of a credential to provide a list of claims (identified as `(name, value)`
-pairs) to our SDK. The pairs will be represented in the credential in the following way:
-- Each pair will have a nonce unique associated to it (this is added to protect against brute force
- attacks for a given field), leading to tuples `(nonce, name, value)`
-- The SDK will compute a merkle tree based on a serialization of these tuples
-- We will store a merkle root in the credential, and return to the issuer the inclusion proofs and
- nonces for each field
-- The merkle root will be added as a claim inside the `credentialSubject` field of our current format
-- We will keep outside the hidden fields values that represent issuer DID and signing keys
-
-# Impact of the implementation
-
-We will need to update:
-- Our SDKs to expose the described functionality for credentials construction
-- We also need to add to the SDK, methods to verify the validity of exposed data from the tree
-- The console backend to store the newly added merkle proofs and nonces for the hidden fields
-- The protobuf models to share credentials, we need to add these new proofs, nonces and field names
-- The mobile applications, to allow the selection of which fields should be shared
-- The management console front end to manage this new data
-
-
-## Remarks
-
-### Hidden data and credential ownership proofs
-
-One consideration about the possibility of hiding data, is that we are currently not adding a subject's
-DID in our credentials. Our current requirements expect that the ownership of a credential will be
-verified by "classic" mechanisms, such us the insertion of a photograph of the subject, or biometric
-data represented in custom formats.
-
-The approach of not having subject's DID in credentials, has an advantage on simplicity and avoids
-problems related to key management on the subject's side. On the other hand, we should remark that the
-lack of cryptographic mechanisms restricts the use of automated non-human credential verification. For
-example, if a DID is involved, a service could use it to verify that the holder presenting a credential
-is also in control of the DID that is present in it. Without such DID, the service will need to rely on
-other credential custom ways to verify that a holder is the actual subject of a credential.
-In the context of selective disclosure, a holder may desire to prove a claim without the need of sharing
-sensitive data (e.g. biometric data of identification photograph).
-
-For the scope of this iteration, we will not add subjects' DIDs in credentials.
-
-### Hidden data and credential templates
-
-In our current implementation, we have HTML templates that display a credential with all its attributes.
-If we add selective disclosure of fields (independently of the technique we choose to do so), we need to
-evaluate the impact on the visual presentation we have.
-
-Today, we store the HTML templates _within_ the credential with all its fields populated. In order to
-make sense of the feature of hiding fields, we could move the complete template to the hidden fields of
-the credential. However, this would only allow us to share either the HTML view with all the fields
-populated, or no HTML view at all.
-
-An alternative is to design a presentation layer that could manage the dynamic creation of HTML views.
-This layer would consume a service which abstracts how to retrieve data from a received credential.
-This service would need to handle different formats of credential sharing. For example, in the current
-implementation, we share the entire plain credential. Retrieving data from a credential in this context,
-means to be able to extract fields from based solely on the name/identifier of the field (this may
-require to standardize the naming conventions for credentials' fields).
-On the other hand, if we implement merkle trees for selective disclosure, the service would need to
-handle how to retrieve data from the plain text part of the credential plus the leaves and inclusion
-proofs. Similarly, if we iterate towards ZK techniques, the service will need to handle related
-structures too. All abstracted under a single interface.
-
-Now, the proposal to abstract a layer that allows retrieving data from a received credential, is not
-defining requirements upon the presentation layer itself. We leave the problem of constructing the HTML
-views themselves out of the scope of this document. We want to remark that a view may eventually depend
-on multiple credentials, or even be a derived claim (as is the classic example of "above age" base on a
-"date of bird" claim). The presentation layer for such claims needs proper refinement.
-
-### Privacy limits
-
-We want to remark that, even though this approach adds functionality that we do not support today,
-i.e. some basic privacy, it still has flaws which we would like to document.
-
-- Merkle tree based techniques allow correlating holders' activity. If a holder `H` shares one hidden
- field to a verifier `A`, and other field is shared to another verifier `B`, then `A` and `B` could
- deduce that both pieces of data revealed correspond to `H`. This is because both verifiers can see
- that the same credential was used (because the credential hash will be the same in both cases)
-- Note that the above, is not a consequence of the way in which we hide the fields, but a consequence
- of the fact that we share the entire credential to both verifiers. This is needed because we check
- the timestamp of the credential creation.
-
-The points above are known for us, and we intend to iterate towards other cryptographic schemes to
-overcome these correlations. We remarked it here for completeness purpose.
diff --git a/docs/protocol/subject-did-on-credentials.md b/docs/protocol/subject-did-on-credentials.md
deleted file mode 100644
index 40f693c9d2..0000000000
--- a/docs/protocol/subject-did-on-credentials.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Subject DIDs on credentials
-
-As mentioned [before](selective-disclosure-merkle-trees.md#hidden-data-and-credential-ownership-proofs),
-we currently verify that a credential belongs to a contact by relaying to a face to face identification.
-For example, we expect to add a picture, or biometric data in credentials, and we expect to check that a
-person actually matches this data. However, we could make use of DIDs and cryptography to automate this
-process.
-
-If we add a DID to the `credentialSubject` field of our credentials (using the `id` field as specified in the
-[W3C data model](https://www.w3.org/TR/vc-data-model/#credential-subject)), we could provide cryptographic tools
-to credential subjects. Once we add these DIDs, we could request at credential sharing time, a proof that the
-holder sharing the credential actually controls the DID that appears on it.
-
-We need to describe interaction steps that involve a holder and a recipient. We assume that both parties have a trusted
-channel, and that there is a unique identifier for the recipient, e.g. a DID `DID_R`, for which both parties agree that
-no other entity would be able to impersonate. Another example could be something like the recipient name (e.g. University
-ABC).
-
-For the description of the steps, we will assume that the identifier of the recipient is a DID `DID_R`.
-
-The holder would like to share a credential `C` to the recipient. `C` contains a DID `DID_C` in its `credentialSubject`
-field. The interaction between the parties should allow the holder to:
-- Prove to the recipient that he has control over `DID_C`, meaning that `C` was issued to him
-- Avoid the recipient to impersonate the holder (in a different interaction with other party)
-
-Note that the second point is important to forbid a man-in-the-middle situation.
-
-The proof steps are:
-- the recipient, with trusted identifier `DID_R` sends a nonce `N` to the holder.
-- the holder can now send to the recipient the credential `C`, and a signature of `N || DID_R` (the concatenation
- of the received nonce and the recipient DID). The signature includes the key id used associated to `DID_C`.
-- the verifier concatenates his identifier (`DID_R`) to the nonce he shared and validates the received signature.
-
-In this way:
-- The holder generates a proof that will be accepted by the recipient (because the recipient generated the nonce `N` and
- the signature is produced with a key from `DID_C`)
-- The recipient will not be able to impersonate the holder against other actor, because, even if the other actor also
- uses `N` as nonce, the signature is tied to the `DID_R` DID. Recall that we have a secure channel assumption, and that
- we also assume `DID_R` to be a trusted identifier for the recipient. Therefore, the intended recipient could not
- impersonate the holder against the third actor.
-
-
-## Note related to linkability
-
-The exchange of messages described above has a disadvantage for the holder. It leaves non-reputable proof that he
-interacted with the recipient (the signature of `N || DID_R` uses a key associated to `DID_C`).
-
-We have analyzed that, in order to fix this problem, we would need to move to an approach based on zero knowledge proofs.
-ZK approaches tend to prove that the holder knows a secret associated to the credential that only the real holder knows.
-We are evaluating the implications for a future iteration.
-
-## Work to do
-
-- We need to update the mobile apps to generate DIDs for contacts (subjects/holders)
-- We need to update the credentials SDK to add the `id` property inside the `credentialsSubject` (in the current design,
- this actually may not require changes in the SDK not backend)
-- We need to implement the protobuf messages to represents the steps of interaction we described above
-
diff --git a/docs/protocol/unpublished-dids.md b/docs/protocol/unpublished-dids.md
deleted file mode 100644
index 78b0dd3736..0000000000
--- a/docs/protocol/unpublished-dids.md
+++ /dev/null
@@ -1,181 +0,0 @@
-# Unpublished DIDs
-
-## Objective
-
-In order to increase the scalability and reduce operational costs for our [protocol](./protocol-v0.3.md), we have
-proposed the use of unpublished DIDs. In the current implementation (at the time of this writing), every DID is
-published on-chain as soon as it is created. After publication, a node can resolve a DID and obtain the current state of
-the DID document. Publishing a DID currently allows to:
-
-- Resolve the DID
-- Send DID update operations
-- Issue credentials in our setting where proofs of existence are posted on-chain
-
-The main drawback we face is that every DID publication requires an underlying ledger transaction, leading to a delay
-between the moment the DID is generated, and the time it becomes resolvable by nodes. The process also adds a fee cost.
-We find it reasonable to believe that a substantial number of users won't use every DID for all the above features.
-Hence, we propose to mitigate the stated drawbacks by allowing a DID to fulfil a subset of functionalities _before_
-being published. In particular, we would like to create a concept of **unpublished DID** that:
-
-- Allows a DID to Be resolvable before on-chain publication
-- Allows the DID to be published if needed
-
-In the rest of the document, we present an approach we have evaluated from Sidetree and comment on the potential next
-steps we could follow.
-
-
-## Approaches reviewed
-
-### Sidetree
-
-In [Sidetree](https://identity.foundation/sidetree/spec/) we can see the definition of
-[Long form DID URI](https://identity.foundation/sidetree/spec/#long-form-did-uris) designed to
-
-> * Resolving the DID Documents of unpublished DIDs.
-> * Authenticating with unpublished DIDs.
-> * Signing and verifying credentials signed against unpublished DIDs.
-> * After publication and propagation are complete, authenticating with either the Short-Form DID URI or Long-Form DID URI.
-> * After publication and propagation are complete, signing and verifying credentials signed against either the Short-Form DID URI or Long-Form DID URI.
-
-In a simplified description, the specification uses URI parameters (the `?initial-state=` parameter) to attach the
-initial state of the DID to the DID URI. The intended use is that, a resolver would take the DID and attempt to resolve
-it, if the associated DID Document is not found, then the resolver returns the decoded initial state document attached
-in the `initial-state` URI parameter. If, however, the DID Document is resolved, then the `initial-state` value is
-ignored and resolution works as if the URI parameter was never attached.
-
-The approach described faced the following problem
-As discussed in issues [#782](https://github.com/decentralized-identity/sidetree/issues/782) and
-[#777](https://github.com/decentralized-identity/sidetree/issues/777) on Sidetree's repository and on
-[this issue](https://github.com/w3c/did-core/issues/337) in DID Core, the use of URI parameters may lead to some
-inconsistencies in the resolved DID document. For example, DID Documents
-[MUST have an `id` field](https://w3c.github.io/did-core/#did-subject) which MUST be a DID. This means that if we have
-a long form DID:
-
- ```
- did::?initial-state=
- ```
-
-which hasn't been published, and we resolve it, the DID Document obtained should look like:
-
- ```
- {
- id: "did::",
- ...
- }
- ```
-
-leading to an `id` that could be accidentally used and share while being it not resolvable (because the DID could
-remain unpublished). At the same time, the `id` `"did::?initial-state="` is not a valid
-DID (it is a [DID URL](https://w3c.github.io/did-core/#did-url-syntax) though), leading to an invalid DID Document.
-
-To mitigate the issue, Sidetree's working group added to their reference implementation a
-[different format](https://github.com/decentralized-identity/sidetree/commit/b6945a9286d053e8254b604c8926ce35dc21d47c#diff-e4b25c798093490e1f72b171527e6596R54),
-
-> // Long-form can be in the form of:
-> // 'did::?--initial-state=.' or
-> // 'did:::.'
-
-During the WG call on Tuesday 29th 2020, it was confirmed that the syntax with `-initial-state` will be deprecated.
-According to [W3C spec](https://w3c.github.io/did-core/#did-syntax), the second syntax leads to a valid DID.
-The long form still maintains a short form counterpart, i.e. for a long form DID:
-
-```
-'did:::.'
-```
-
-we have a short form (a.k.a canonical form)
-
-```
-'did::
-```
-
-The WG didn't define yet (by the time of this writing) how to treat the returned `id` in the resolved DID Document
-_after_ the DID is published. This is, after publication, if a user attempts to resolve the long form DID, should the
-`id` field of the resolved document contain the long form or short form of the DID? Different proposals are under
-discussion.
-
-## Proposal for first iteration
-
-In order to provide our equivalent to long form DIDs, we will adopt the form:
-
-```
- did:prism:hash():base64URL()
-```
-
-### Impact on recovery process
-
-We must consider the impact of these new DIDs in our [recovery process](./key-derivation.md#did-recovery).
-The problem we have with unpublished DIDs, is that they won't be found on-chain during the recovery process iteration.
-this will lead the algorithm to stop in step 4.1. Given that mobile apps will be the first users of these DIDs, it looks
-reasonable to adapt the DID recovery process to ask the connector if a DID was used for a connection instead of asking
- or DID resolution. In the future, we could store all generated DID Documents in a data vault like service.
-
-### DID length
-
-We decided to make some tests to measure how long our long form DIDs would look like.
-We generated 100000 DIDs under 3 settings:
-
-1. With 1 master key
-2. With 2 master keys
-3. With 3 master keys
-
-The code used can be found [here](../../node/test/src/io/iohk/atala/prism/node/poc/EncodedSizes.scala).
-Example results are:
-
-For 1 key
-```
-Generating 100000 dids
-
-printing 3 shortest DIDs
-(did:prism:7e5b03ee503c6d63ca814c64b1e6affd3e16d5eafb086ac667ae63b44ea23f2b:CmAKXhJcCgdtYXN0ZXIwEAFCTwoJc2VjcDI1NmsxEiA4BHOH6oJYfe_pVSqXl69HcRW304Mk4ExOwJye9VWcGBogOBL32BWDzjqnSqoQss8x_qv_02fwyrEL13QCKT9KRoQ=,207)
-(did:prism:8f4ecd2b7123b0ddfb7c4a443b3d25823041d470b388e6bb0577cd5d49f9b8a9:CmEKXxJdCgdtYXN0ZXIwEAFCUAoJc2VjcDI1NmsxEiEA-NGsBmlol3-5qMZ6BXp1Al6-24NAORgjEA1RBhHiYAIaIEDrXK8kjANIzJmOLVHFtKq0j7H_l8Uj96UlyDO3qjk7,207)
-(did:prism:0ad343d0c4b9a69014cde239589df57fb3a0adbe0fb3f5c8c4a294bbfd9827cb:CmEKXxJdCgdtYXN0ZXIwEAFCUAoJc2VjcDI1NmsxEiEA-BxLxhOrKhum0RqhdN-eLCnyMtXHlyYu6b1ASOpvxb4aICf-6F4DzlCthmI0FDomlzbp_GHo_5zFfpUk9c24Jpfe,207)
-
-printing 3 longest DIDs
-(did:prism:0dcbbc7ba0e4194bc43fee20cb9ecf156d9c191b2eb88e75a45decf68d56665c:CmIKYBJeCgdtYXN0ZXIwEAFCUQoJc2VjcDI1NmsxEiEA55RbxHRgZ1zB3rSpHjYuclIF_9G7yhGgWGi7fc6JxVIaIQCEu6hOdE-JljVGcrW5zgendY3fZtDpbJJbNsS2Tjb32Q==,211)
-(did:prism:bbf07987129f43ea213d7c1a1222d6a242efe76c9c853c27d7c90560f287c10d:CmIKYBJeCgdtYXN0ZXIwEAFCUQoJc2VjcDI1NmsxEiEA5_5gl70kwcDvl5gKH2_AJzry1cT2XZHSBfS1dmQTIY0aIQDiK3AHz9ilWDgqWXOFCbyIMlDgYWh9C5-NTni6EYwdjA==,211)
-(did:prism:8b5094925066ade140f99c9bba00c0a8f139882138b5939180af4d827388c1f8:CmIKYBJeCgdtYXN0ZXIwEAFCUQoJc2VjcDI1NmsxEiEA6NKGFL-n5dek0qQP-Qlwq-ea6TatJNYgCNY-ysRfyzYaIQCDs0VVxTcvWn9tZxZufla7dIMzQsSU-Jg_fU3j5XWJNQ==,211)
-
-Average DID length 207.99972 bytes
-```
-
-For 2 keys
-```
-Generating 100000 dids
-
-printing 3 shortest DIDs
-(did:prism:8159135db4b3cac4c09667087ea7c46b1019d85885f72af2bab083ccd092c5d4:Cr0BCroBEloKB21hc3RlcjAQAUJNCglzZWNwMjU2azESHj2JQyzK59GApMJoM5O28nQho9tpIOonINJRUWIE1hogbnHhV4pZQM0eAMBUQ0MC6g0Sa9uF87G74kmN6DvBFpQSXAoHbWFzdGVyMRABQk8KCXNlY3AyNTZrMRIgI9WfWp31w4eQOOEQF2mzVZCednBNIYugJQwL42wkrQYaIFR2BcIwYSnmXEBVbVaYmfz6DOcPPDEgSfVZOK-2kgFE,331)
-(did:prism:471d8c0b8b813de92b4a70604a56fc0ea68d84ccd9c1a4de9ebaeb1afb906235:CsABCr0BElwKB21hc3RlcjAQAUJPCglzZWNwMjU2azESIEQ0OMfxIlt-qpR7lPGNy2G2SldPY8_NPm3ndKkDaCt6GiAirtTWn0QeztWE5i-ZwENrS6PNoAXXFBom1jTAYoxD6hJdCgdtYXN0ZXIxEAFCUAoJc2VjcDI1NmsxEiEAtP0Rm24Fn2Mg6S67SMlBCvoZ09LgKZvwpf26TlxCj7caIH4n2UvHcNYIY6k7EDBtgyDptgpcgR5EWxaf-pLO_PJ-,335)
-(did:prism:ef51a3fa3e5e222c819575e3f137ad45c1c1db7e3c06b21a590332e73d20953b:CsABCr0BEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIQDE_vSfeNUmoyV-qm2jJYVTqTMQ8T83X8IsWBLMt3ctERogNRgDXpxUyteC2kBCEB-L7SIEt7ivJyBETjJcuUHX1IgSXAoHbWFzdGVyMRABQk8KCXNlY3AyNTZrMRIgMu7Gocu-ESEtzbgXNT2tpZzH_UqRC-8KoFPu1fNUscYaIDiDhA1LVhJYHWazwJpGZjB3lQ1kIy-VrO7rRDVQ3zKv,335)
-
-printing 3 longest DIDs
-(did:prism:6c5ca0c67ea138eb41ee1a8060527b8d3bbf921e103e977b77786f6b71567000:CsIBCr8BEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIQCHJNU9sXhHZy3EdgzfGjzClA0B9PVJbe2apsG0ljhdqRogJnESFoQd9AjqBgIcaKmwyCYnVm6tHucyAOcCq59BtfcSXgoHbWFzdGVyMRABQlEKCXNlY3AyNTZrMRIhAJqTQ7CzDZ8d02kXFFlw_Do5sEiKpZdtRQLAw9EqhEi9GiEAtMlOZRfGivpnfpDbn7uDBZTb5Rj3Y3rTgSfIdAFhQP0=,339)
-(did:prism:0a3d98977ebeda7d11bddcc98d3b3e7072886e3e91ddf2fa7f99bdb1c110b00a:CsIBCr8BEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIFWdJTyU0lf-lbiLKxcn2Ags4V88I0QSZash5WRm7UBUGiEAnaq_ipQkUUYXUaYe9WzUqdFa6LOv6tOIPH4Rfm4vmhQSXgoHbWFzdGVyMRABQlEKCXNlY3AyNTZrMRIhAMdis1_dwKgyV02uZQCfvx0O9J-dXkP1DXZUbiiA9kVvGiEAtShw9wGDMe1FnO1Be7MANPknlhUlZAPsZMbf7YT6C2E=,339)
-(did:prism:ca6b2058009b455dca1c3b491fb06e4d37713eda32099b2cf91e31e06befb569:CsIBCr8BEl4KB21hc3RlcjAQAUJRCglzZWNwMjU2azESIQDS7RopaBAO6dq3l9TBD_x-FhHAvH68wszcHKDSWguUUBohAO0eUWnWHTHNVXoqsHYwfYABRZ_wlCVTfcZKsV-ZuSd_El0KB21hc3RlcjEQAUJQCglzZWNwMjU2azESIEqn3DsW7NdGHD96OOIMXUTzl0ab3m5v4oaWZHqqbjNVGiEA3kk0XVALFtm-U7zp1oWUaAP-y4fJlUVoUCDiE6h1OEU=,339)
-
-Average DID length 337.737 bytes
-```
-
-For 3 keys
-```
-Generating 100000 dids
-
-printing 3 shortest DIDs
-(did:prism:e2f9e03bf49c5644066dd6efb09a130576c46e4c687861c18685ffc0c63f0bce:Cp0CCpoCEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIFgi7BOQhH8RtgZist8Ag416pL_JYBfaP0RsO21FI2JaGiEA2bzA4iPnrvX-8cdbVjytOfafp48MGuJiSMqhKYenM70SWwoHbWFzdGVyMRABQk4KCXNlY3AyNTZrMRIgbHxvyJdH5S_0ZhI4L2sh_hEdrixpAJ3eE3lpHfh-bYYaH12aRPGE3QntYeekBH24cfwLmbb-dVCByILWvWtXMwMSXAoHbWFzdGVyMhABQk8KCXNlY3AyNTZrMRIgeVpCGozZ_KhkwPd46HJnkcqmdDckGVJHLJgyuxjvh5saIHt3BeIApG6jqj84ZP66AfsTUpuNQP1BSVLzdhcQLoPD,459)
-(did:prism:ad7a81274c0a0295dcf4417ccc501ccfbca1165bf2bcad94430aa42171ed173d:Cp0CCpoCElwKB21hc3RlcjAQAUJPCglzZWNwMjU2azESIBWIwCfSj6MtNG29Z_t7-AJRWyd1w95efBnf5BhA_3QMGiBLq18HV9KGL--BusOyqeWInFu98A0lVFLUGK4snwus9hJcCgdtYXN0ZXIxEAFCTwoJc2VjcDI1NmsxEiBwLUQ1jTnE8JdPcf_C41tyd7Y2loCkGlykfslZy0g4ShogUco4yJ-vf4vhCV3BATY33tI5ybM6y1y8L8VHP4BtnAASXAoHbWFzdGVyMhABQk8KCXNlY3AyNTZrMRIgAY_UwVb48SGeOxspuFTqEYWedQOCL_6lyd198ABWTfcaIBifedeOY8K3CKAZf06S1BG3fGodNzN3jAfi5oV1usmI,459)
-(did:prism:b7800057dd8d48c86709db5f9191105666301449fa481f0bd4e2e0f244af2745:Cp0CCpoCElwKB21hc3RlcjAQAUJPCglzZWNwMjU2azESIGrZ5TSsDsQCUVwUs0wIe-wCkh-VSFgDfAsP06qVCYZZGiBEztwdpOi71HeyYVkowJt70_moGYxHe-5L8cT0WTcmCxJcCgdtYXN0ZXIxEAFCTwoJc2VjcDI1NmsxEiA8B_NKd1_1G_eGFO5SPDjg3Am0xWQKX7opBwVV5XU02hogenHpXX30EfTwbZCQhyLNRMaZDQFcfJk5t6f1V1c0v1wSXAoHbWFzdGVyMhABQk8KCXNlY3AyNTZrMRIgGMcERNmpRhx04j39poUvu1TMVpnSVjvoaez5WW3oKEMaICRim4pqzKYerpkkxmalZAC_NSA-rngcnUkzsiw3-NeD,459)
-
-printing 3 longest DIDs
-(did:prism:4bb94e65c2bc37fc4e52c4f2471418302d26e43cf126677f407a0451be9a7c22:CqECCp4CEl4KB21hc3RlcjAQAUJRCglzZWNwMjU2azESIQCdzPQA1RFTIpEp1CJNwO0wqe1qz5JawnI8sDlZfrkw5BohANF1qt5111DzBKPYsnrSjm01zJ--_470DQopP4QRyGgHEl0KB21hc3RlcjEQAUJQCglzZWNwMjU2azESIQC_IY74aciuAQDyLAZjXGwaq1QDfHd27okxcGXm6OOVUxogEhbKgQWBZTWzRrjLkpAg4c-TiCtOXK7rOVvoFkV8FnYSXQoHbWFzdGVyMhABQlAKCXNlY3AyNTZrMRIgXm2tdR9ReKjOJi2H3YtvQPkaSO06zkFjAKR_WDHYzJMaIQDUI8-MVdBoinpUUy0zRX8aUlhZ1ij4nwDBfJB6QnJrow==,467)
-(did:prism:ca7fbf765201acafc4e20ce38ef4dc3d159b6067b5b13ee99ffbf6b7e3f6facc:CqECCp4CEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIAqGJ2KqLJ3rfQ-hkB2eKRSLgfqcN314tB1F0P6gM-6kGiEAohZpSVfv_ZYiMQv2IosQdgZjEzNlzzwBeGDpnXJ01lYSXgoHbWFzdGVyMRABQlEKCXNlY3AyNTZrMRIhAKyAvzd6NmegIuPYsQKU6brHjjrkEoTyuCWfui7vQZ05GiEA8yogy0Ex_MbjWq_a6U-J97bekXrEzGzcqKhOh3CjxMcSXQoHbWFzdGVyMhABQlAKCXNlY3AyNTZrMRIhAIcBqfZKEmKSudx6j4D_g_V4UMwFmRsBGQ1ncX6s3l1vGiAhXUWh5fgnfAFnZhGHh6rElvI8XaVfI6gBbdT2UriRDw==,467)
-(did:prism:33afe0e357fa60eaf205ec9f1fd3e579c3a656c27b5143fbbdfa35c2c2b5916d:CqECCp4CEl0KB21hc3RlcjAQAUJQCglzZWNwMjU2azESIQCwxcy1xkqJcbETa07ucq1GrlEfmCSj9S6U5PwuDOb58BogIzMlA9N-1sRDsRSwpsUrC755bmfNQyok-6tWa2q-cT8SXgoHbWFzdGVyMRABQlEKCXNlY3AyNTZrMRIhALwExYd3DQbo37G8MvF3E-0q-dvR5CG_mkGr11WtNd3_GiEAh7kMetZcM7RgMrnru4An2K2ECP8QvCl6_0TjslU8S9gSXQoHbWFzdGVyMhABQlAKCXNlY3AyNTZrMRIgRF_1TeGjfM64opVJOQfOPbqoldGQgdw-ZiE3X5-TqIoaIQC7Epdgj-kYey_E1RqnIO_slSU29uTXjhDjw-N0tMKT5A==,467)
-
-Average DID length 464.28944 bytes
-```
-
-We see an increase of ~130 bytes per key. We should point out that we are representing keys as elliptic curve points.
-We could reduce space by representing keys in compressed representation (i.e. a coordinate and a byte that indicates the
-sign of the other coordinate).
-
-By comparison, we can see [this Sidetree test vector](https://github.com/decentralized-identity/sidetree/blob/master/tests/fixtures/longFormDid/longFormDid.txt)
-that represents a long for DID of them. The DID has 883 bytes.
diff --git a/docs/protocol/updates.md b/docs/protocol/updates.md
deleted file mode 100644
index 95498741c3..0000000000
--- a/docs/protocol/updates.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Update mechanism
-
-## Context
-
-We have designed a protocol description of PRISM that guided our implementation for the PRISM node.
-We are now approaching the first deployments in production systems, and we need to consider the challenges that this bring.
-In particular, the distributed nature of our system requires special care at the time of updating the node. Updates can have different consequences:
-1. If we make internal optimizations in the PRISM node, users could opt to delay upgrading the version as no semantic change will come and the
- infrastructure will keep working.
-2. When a Cardano hard fork approaches, all PRISM node users will need to update the Cardano node dependencies. The protocol would remain unchanged,
- same with client code.
-3. If we add new operations, deprecate old ones, or change the semantics of our protocol events, we would need all users to update their nodes in
- order to keep a consistent view of DIDs and credentials state.
-
-Note that the 3 scenarios illustrate different consequences. In the first case, if the PRISM node operator does not upgrade its node, then it will
-still be able to follow the protocol and keep a consistent view with the rest of the PRISM nodes. In the second scenario, if the user does not update
-its node, he won't be able to continue reading new events (and may have problems sending new events too), because the Cardano node won't be able to
-keep in sync with the network. Therefore, the user will be behind other nodes. In the third case, we have something that could be similar to case 2,
-the user will be out of sync with respect to updated nodes. However, the user will see old (possibly deprecated) events as valid, creating additional
-security risks.
-We could remark that it seems reasonable to conclude that all nodes running the same version will always see the same state.
-
-How could we mitigate the risks of cases 2 and 3?
-- For case 2, the protocol remains unchanged, but we know that users will be out of sync with the chain, we could rely on external coordination
- to make updates. The incentive to update is that their PRISM nodes will likely stop working due to incompatibilities of the Cardano components with
- the current network.
-- For case 3, it is a trickier situation, because the PRISM nodes do not have a way to detect loss of consensus.
-
-
-## Proposal
-
-The proposal we offer is to add an special protocol event, `VersionUpdate`. The event should be signed by a special key (or DID) controlled by IOHK.
-The idea is that every node will have their own version hardcoded (or grabbed from config files). The node will process events normally and whenever
-it sees a `VersionUpdate` event, it will retrieve the new version from the message and compare against its own internal version. If the node version
-is behind and incompatible with the event version, then it will stop processing further events. We could also force the node to not send new events
-to the blockchain when outdated.
-We can think of two types of updates to the protocol:
-- Major: Meaning that some existing AtalaOperations before the version will become invalid. This requires the PRISM node operator to perform an
- upgrade to proceed interacting with the network (it is a way to deprecate legacy events). This should likely force re-indexing the node
- state
-- Minor: Meaning that all the AtalaOperations that the node understands before updating remain as valid, but new ones have been added which could
- add more state to the node. For example, the protocol adds new events to represent credential issuance or revocation. In this case, the node
- could opt to not upgrade. This could be helpful to simplify maintainance on systems that do not intend the use new features.
-
-We should note that:
-- Every protocol version will have a node version.
-- SDK versions could require minimal node versions, that implement protocol versions.
-
-An example of the messages for the event:
-
-```proto
-message VersionUpdate {
- VersionInfo version = 1; // information of the new version
- string signing_key_id = 2; // id of the IOHK key used to sign this event
- bytes signature = 3; // signature on the `version` field using IOHK's key
-}
-
-message VersionInfo {
- string version_name = 1; // (optional) name of the version
- // new major version to be announced, if this value is changed, the node MUST upgrade before `effective_since` because the new protocol version
- // modifies existing events. This implies that some events _published_ by this node would stop being valid for nodes in newer version
- int32 new_major_version = 2;
- // new minor version to be announced, if this value changes, the node can opt to not update. All events _published_ by this node would be also
- // understood by other nodes with the same major version. However, there may be new events that this node won't _read_
- int32 new_minor_version = 3;
- int32 effective_since = 4; // Cardano block number that tells since which block the update is enforced
-}
-```
-
-### Comments on backward compatibility
-
-It is interesting to consider that we have different alternatives to manage code complexity as we upgrade the system.
-On one hand, we could support all the legacy events and semantics of previous versions. On the other hand, given that we are marking on-chain the
-update epoch, this implicitly defines a snapshot of our system. Meaning that we can distribute a new version of the software that at some point
-replaces legacy code by a snapshot of the system state. This would be a verifiable snapshot, and could help faster bootstrap in the future.
-
diff --git a/docs/public-docs/.gitignore b/docs/public-docs/.gitignore
deleted file mode 100644
index f47cb2045f..0000000000
--- a/docs/public-docs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.out
diff --git a/docs/public-docs/Makefile b/docs/public-docs/Makefile
deleted file mode 100644
index 8bb1626a87..0000000000
--- a/docs/public-docs/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-main:
- pdflatex article.tex
- pdflatex article.tex
-
-clean:
- rm article.aux article.log article.toc article.pdf article.out
diff --git a/docs/public-docs/README.md b/docs/public-docs/README.md
deleted file mode 100644
index fcad6322f2..0000000000
--- a/docs/public-docs/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Protocol description
-
-This folder contains a compact description of the protocol behind PRISM.
-In order to compile the document, you only need `pdflatex` installed.
-Once installed you could either run:
-
-```bash
-$ pdflatex article.tex
-```
-
-or use the Makefile by running:
-
-```bash
-$ make
-```
-
-You can delete the generated files by running:
-
-```
-$ make clean
-```
diff --git a/docs/public-docs/article.tex b/docs/public-docs/article.tex
deleted file mode 100644
index 8999c21447..0000000000
--- a/docs/public-docs/article.tex
+++ /dev/null
@@ -1,266 +0,0 @@
-\documentclass[10pt,a4paper]{article}
-\usepackage[margin=1in]{geometry}
-\usepackage{hyperref}
-\usepackage{xcolor}
-\usepackage{graphicx}
-
-
-\title{PRISM protocol v0.3}
-
-\author{Atala PRISM team - IOG}
-
-\date{}
-
-\begin{document}
-
-
-\maketitle
-
-\begin{abstract}
-This document describes the current state of the protocol that supports PRISM. PRISM is a framework for the management of decentralized identifiers and verifiable credentials.
-\end{abstract}
-
-\setcounter{tocdepth}{3}
-
-\tableofcontents
-\newpage
-
-%----------------------------------------------------------------------------------------
-% INTRODUCTION
-%----------------------------------------------------------------------------------------
-
-\section{Introduction and protocol description}
-
-In this section we will informally describe a protocol to create and manage DIDs (\textbf{D}ecentralized \textbf{Id}entifiers), that also allows to manage the creation, revocation and presentation of verifiable credentials.
-
-\subsection{DIDs management}
-
-For simplicity, we will define a DID as a string identifier of the form
-
-\begin{center}
- \emph{did:prism:$\langle$identifier$\rangle$}
-\end{center}
-
-Each DID is associated to a list of public keys. The list can be updated over time by adding and revoking keys. Each key has an assigned role during its lifetime. The three possible roles we describe are:
-
-\begin{description}
-\item[issuing keys] used to issue credentials on behalf of the DID.
-\item[revocation keys] used to revoke credentials on behalf of the DID.
-\item[master keys] used to add or revoke other keys associated to the DID.
-\end{description}
-
-Our construction for DID management relies on an underlying blockchain. The blockchain allows us to publish transactions with sufficiently large metadata. We use the blockchain for both; data distribution along parties, and for consensus related to the order of relevant events in our protocol. All the parties participating in our protocol are running a blockchain node that reads the metadata of the blockchain transactions. When they find a protocol event, they process it to construct their view of the system.
-
-In the following sub-sections we describe the events in our protocol related to DID management.
-
-\subsubsection{DID creation}
-
-In order to create a DID, a user will follow the steps below:
-
-\begin{itemize}
-\item The user generates a desired list of public keys. He will associate each key to a key identifier and a role.
- There must be at least one key with the master role in this initial list.
-\item The resulting list is encoded and hashed producing an encoded \emph{initial\_state} and a \emph{hash}.
-\item The \emph{hash} is encoded in hexadecimal form producing an \emph{encoded\_hash}.
-\item The user's DID is constructed as \emph{did:prism:encoded\_hash}.
-\item The user will a signed \emph{CreateDID} event in the transaction metadata that is submitted to a blockchain.
- The event contains the list of public keys with their identifiers and roles.
- The signature will be performed using the private key associated to any of the master keys in the initial list of keys.
-\end{itemize}
-
-Once the transaction is added to the blockchain with a sufficient number of confirmations $d$ (i.e. the block containing the transaction has $d$ blocks appended after it on the underlying chain), all the participants following our protocol will validate the signature of the \emph{CreateDID} event. After verification, they will register the DID as \emph{published} along with all the keys posted in the initial list. The keys will be considered valid since the time associated to the event carrier transaction. This is a timestamp generated by the blockchain.
-
-Optionally, while waiting for blockchain confirmation, the user can also use the associated DID
-
-\begin{center}
-\emph{did:prism:encoded\_hash:initial\_state}
-\end{center}
-
-This DID, called \emph{long form} or \emph{unpublished} DID will not be recognised as \emph{published} by other parties in the protocol. However, other users would be able to verify that the list of keys encoded in \emph{initial\_state} corresponds to the DID \emph{did:prism:encoded\_hash}. The recipient of an unpublished DID needs to query the state of the \emph{short form} of the DID (the prefix before "\emph{:initial\_state}") to check for changes in the list of keys.
-
-\subsubsection{DID update}
-
-Updating a DID means that the user controlling a master key associated to it, will add new or revoke existing keys to the list associated to the DID. In order to update the list of keys, the user will:
-\begin{itemize}
-\item Create the list of key identifiers that the user wants to revoke from the current state of his DID.
-\item Create a list of keys he wants to add to the list of keys associated to his identifier.
- This keys must be associated to a role and a fresh key identifier.
-\item Create an \emph{UpdateDID} event that contains the two previously generated lists.
- The event also carries a hash of the last \emph{UpdateDID} performed over the DID (or the hash of the \emph{CreateDID} if this is the first
- update)
-\item Sign the event with one of the \emph{currently associated} master keys of the DID, and publish the event inside the metadata of a transaction.
-\end{itemize}
-
-When the event added to the blockchain has enough confirmations, all parties will process the event. This is, they will validate the signature, and update their internal knowledge of the updated DID. The newly added keys and the revoked keys will be timestamped using the time of the carrier transaction.
-
-With these two events we have shown how to create and update DIDs.
-
-\subsubsection{Obtain the list of keys associated to a DID}
-
-Now, we can define the process that a protocol participant must follow to obtain the keys associated to a DID.
-We mentioned that a DID can be presented in long form (or as an unpublished DID), or in its short form. Hence, we will describe the process in the two cases.
-
-In order to obtain the keys associated to a DID in short form, we simply read the state we have constructed so far by processing the \emph{CreateDID} and \emph{UpdateDID} events we read from the blockchain. The list of keys associated to the DID, is the one we see in our internal state. If we do not find the DID as a published one in our state, we return a \emph{DID unknown} response.
-
-Now, if the DID we receive is in long form, we first verify that $hash(initial\_state) = initial\_hash$. If that check fails we reply with an \emph{Invalid DID} response. If the check passes, we extract the short form of this DID (the prefix that ends right before the last \emph{":"}), and we check the list of keys as described in the previous paragraph. If the result of this process is a \emph{DID unknown} response, then we decode the list from the \emph{initial\_state} suffix and return this list as a result.
-
-With this process we complete the events and operations related to DIDs.
-Lets now move into the events related to issue and revoke credentials.
-
-\subsection{Verifiable credentials}
-
-Now that we have DIDs, we can proceed to the creation and revocation of credentials. A \emph{credential} to us is a set of \emph{claims}. Each claim is represented by a property-value pair, e.g. $(name, John)$. Credentials are created by \emph{issuers} to \emph{subjects}. Both issuers and subjects are represented with their corresponding DIDs.
-
-For practical reasons, we assume that issuers want to issue credentials in batches. This is, an issuer would like to create multiple credentials at once. Below we describe the steps to issue a batch of credentials.
-
-\subsubsection{Credentials batch issuance}
-
-Lets us refine first what a credential is in the context of this document
-
-\begin{description}
-\item[Credential] a credential is a JSON document that contains three key-value maps.
- \begin{itemize}
- \item the "issuer" key represents the issuer DID.
- \item the "keyId" key represents the key identifier associated to the issuer DID that was used to sign the credential.
- \item the "claims" key represents claims that the issuer makes about the subject. The claims are grouped in a JSON object.
- There is a key for each claim asserted by the issuer. One particular claim is the subject DID, that represents a DID
- controlled by the subject of the credential.
- \end{itemize}
-\end{description}
-
-In the following steps, we assume the issuer and subjects have already established a secure communication channel. We also assume that the issuer has
-a published DID that will be used to represent him in credentials.
-In order to issue a batch of an arbitrary number $N$ of credentials, the issuer will:
-\begin{itemize}
-\item Ask to each subject the DID they would like to use for the credential they will receive.
-\item The issuer creates the credentials for each subject. It uses his published DID and the key id of an \emph{issuing key} to populate
- the credential corresponding fields. On each credential, it adds the corresponding subject DID as one of the claims.
-\item The issuer encodes each individual credential using a base64URL encoding.
-\item The issuer signs each individual encoded credential with an \emph{issuing key} associated to his publicly known DID.
- The signing key corresponds to the key id in the credentials' claims.
-\item The issuer encodes each signature, and concatenates them to their corresponding credentials using a dot (".") as separator.
- This produces strings of the form $\langle{}encoded\_credentials\rangle{}.\langle{}encoded\_signature\rangle{}$. We call these strings \emph{signed credentials}.
-\item Now, the issuer takes all the signed credentials, and computes a merkle root from them.
-\item The issuer creates an \emph{IssueBatch} event that contains the merkle root and issuer DID, and signs the event with an \emph{issuing key} associated to this DID.
-\textbf{Note}: Today we use the same issuing key for the event signature and the individual credentials signature, but those could be different keys.
-\item The issuer attaches the signed event to the metadata of a transaction and sends it to the blockchain.
-\item The issuer gives to each subject their corresponding signed credential along with their associated merkle inclusion proof.
-\end{itemize}
-
-Likewise previous protocol events, all parties will process the transaction once confirmed, validate the event signature and timestamp the merkle root with the time associated to the carrier transaction.
-
-Note that, even though the issuer DID \emph{must be published}, subjects' DIDs can remain unpublished.
-
-\subsubsection{Credentials revocation}
-
-In order to revoke a credentials, issuers have two alternatives:
-\begin{enumerate}
-\item They revoke all the credentials in a batch.
- This is done by signing a \emph{RevokeBath} event with a \emph{revocation} key associated to the issuer DID.
- The signed event contains the merkle root to revoke.
-\item They revoke specific credentials associated to a batch.
- This is done by signing a \emph{RevokeCredentials} event with a \emph{revocation} key associated to the issuer DID.
- The signed event contains both the merkle root associated to the credentials to revoke, and the hashes of the specific credentials to revoke.
-\end{enumerate}
-
-In both variants, the event is published on-chain and processed by all the participants. The participants will timestamp the new information with the carrier transaction time.
-
-\subsection{Credential presentation and verification}
-
-Once they receive credentials from issuers, subjects will present them to interested parties, called \emph{verifiers}. For example, a student may receive a verifiable credential from a university, and would like to present his credential to a potential employer. In our setting, the verifier will be a party following our protocol events from the blockchain. The steps to present and verify a credential are the following:
-\begin{itemize}
-\item We assume a safe communication channel between the subject and verifier.
- We also assume that the channel can be identified by an identifier $ch$.
-\item The subject shares his credential and merkle inclusion proof to the verifier.
-\item The verifier then:
- \begin{itemize}
- \item computes the merkle root from the inclusion proof and the credential hash;
- \item extracts the issuer DID and signing key id from the credential claims,
- \item retrieves from the state he computed from the blockchain, the timestamps associated to the merkle root and the issuing key,
- \item checks if the merkle root or credential hash has been revoked
- \item validates the issuer signature on the credential, and determines if it was signed at a time when the issuer key was valid.
- \end{itemize}
- \textbf{Note:} The signature of the IssueBatch event has already been verified by the protocol participants at the time of batch publication.
-\item At this point, the verifier knows that the credential was properly signed. Now, he shares a nonce to the subject and asks him to sign it with a key associated to his DID.
-\item The subject signs the hash of $nonce || ch$ and returns the signature to the verifier.
-\item The verifier now checks the signature and concludes that the credential subject is indeed the person presenting it.
-\end{itemize}
-
-%----------------------------------------------------------------------------------------
-% PROPOSED IDEAS
-%----------------------------------------------------------------------------------------
-
-% \section{Ideas to add missing properties}
-\section{Further thoughts}
-
-\subsection{Increasing scalability}
-
-In the previous sections we described a protocol that allows transparent management of decentralized identifiers and credentials.
-The protocol usage of a blockchain facilitates the solution to three points:
-\begin{itemize}
-\item Events ordering: All participants can agree on the order in which any pair of events occurred
-\item Fork protection (safety): There is no point in time where a valid sequence of processed protocol events could become invalid, nor replaced by
- another sequence
-\item Data transmission: All participants receive the same sequence of protocol events as they are transmitted through the blockchain
-\end{itemize}
-
-The second point has particular relevance to guarantee that the history of events can be trusted. For instance, imagine Alice controls a DID (its
-associated master keys) that represents a real world object, like a car. If Alice's wants to sell the car and transfer the corresponding DID, she
-would perform a DID update that revokes all the master keys controlled by her, and adds a new master key controlled by the new car owner. If Alice
-could later "undo" this update, the DID ownership transfer could not be trusted.
-
-The properties brought by the blockchain come at a cost. Namely, the protocol throughput (number of events that participants can attach in
-transactions per unit of time) is bounded to the blockchain throughput. In order to scale the throughput, we have reviewed Sidetree's \footnote{https://identity.foundation/sidetree/spec/} approach. The high level idea behind Sidetree is to not publish individual events attached to blockchain
-transactions, but to publish a hash-link to an off-chain content addressable storage (CAS), like IPFS. The CAS will contain the file referenced by
-the hash-link, which contains a batch of many protocol events (allowing to transcend the size limits imposed by blockchain transactions).
-
-We identify a non-ideal drawback about this approach, which is a loss of the safety property. In the setting defined by Sidetree, participants can
-control changes to the past of their identifiers. This is, if Alice desires, she could create a DID by posting a file, $F_{1}$ on the CAS, and its
-hash on-chain, $F_{1}$ would contain Alice $CreateDID$ event. Later, Alice can create a file $F_{2}$, containing an $UpdateDID$ event, post the hash
-of $F_{2}$ on-chain, but intentionally not posting $F_{2}$ on the CAS. Then, Alice can create a third file $F_{3}$ which contains an new $UpdateDID$
-event that would be invalid if $F_{2}$ were revealed in the CAS (e.g. $F_{2}$ event could revoke the key that signs the update in $F_{3}$). However,
-Alice can post both $F_{3}$ in the CAS and the hash of $F_{3}$ on-chain. In Sidetree, this sequence of actions would lead all protocol participants
-to believe that Alice's DID is in certain state, produced by the $CreateDID$ of $F_{1}$ and the $UpdateDID$ from $F_{3}$.
-
-Alice has the power to reveal at any point $F_{2}$, making the update from $F_{3}$ as invalid, and forcing all participants to update Alice's
-DID to the state reflected by $F_{2}$. This is known as \textbf{late publish attack}, and makes technically impossible to trust the past history
-history of events in Sidetree implementations. As an example consequence, it is not possible to transfer ownership of a DID when using Sidetree.
-
-In order to avoid introducing this issue, we considered two options.
-
-The first one is to add a permissioned actor (or a federation of them) that are allowed to batch protocol events and publish both on-chain hash-links,
-and files in corresponding CAS. The special actor introduces a trust model that assumes that it will always reveal files. If the actor fails to
-reveal a file, the protocol participants stop processing further batches until the missing file is revealed. This is not possible in Sidetree because
-any participant would be able to freeze the system by not revealing a file.
-
-This protocol variation would still allow users to publish events on-chain directly, leaving space for some decentralization for those participants that do not want to depend on the centralized batcher.
-
-In order to use the batcher to publish the $UpdateDID$ events associated to a DID $D$, the owner of $D$ will have to declare on-chain that future
-updates for $D$ will be found on the off-chain batches. This public declaration is needed to avoid race conditions when a hash is on-chain but
-a file is not revealed.
-
-A second option consist of a more decentralized variation of the previous approach. Namely, allow any participant to propose itself as an event
-batcher. The proposal is performed by submitting an on-chain event declaring the batcher DID. Every user that would like to batch its events
-off-chain should publicly notify the batcher they will use. Only one batcher can be assigned to each DID, this is, the $UpdateDID$ events associated
-to a DID will be published by at most one batcher. The objective is once again, to be able to identify missing files in a sequence, allowing
-participants to stop processing files out of order.
-
-In any of the approaches, if the associated batcher stops publishing files, the DID registered to the batcher is "frozen" until the file is revealed.
-We could re-introduce some liveness protection to DID owners by requesting batches from a same batcher to be separated at least $N$ underlying
-blockchain blocks, in order to provide that time for users to send a $Contention$ event that invalidates the events associated to a DID published in
-the previous batch. During the processing of batches, participants will wait the contention period (the $N$ blocks) before applying events from a
-batch in order to not apply the updates to contended DIDs.
-
-Notes:
-\begin{itemize}
-\item we could change the $CreateDID$ event to incorporate an optional batcher DID from start, the batcher DID would be part of the initial state,
- bounding the batcher to the DID suffix.
-\item we could support events to de-register from a batcher, and to switch batcher too.
-\item a user is free to batch his own DIDs' related events. This could be useful for a case like a car manufacturer that would like to batch all DIDs
- associated to their cars, allowing car owners to de-register from a batcher at will.
-\item a priori, we do not see a need to batch credential issuance/revocation events
-\end{itemize}
-
-At the time of this writing, we haven't implemented any of the above approaches. We had, however, implemented on-chain batching, which allows to submit multiple events in a single transaction. The events are processed in order per transaction. This has been helpful in use cases where a multiple events coming from different entities, are submitted to the same PRISM node to publish them.
-
-\end{document}
diff --git a/docs/research/Makefile b/docs/research/Makefile
deleted file mode 100644
index 4f005cab8f..0000000000
--- a/docs/research/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-main:
- pdflatex article.tex
- bibtex article
- pdflatex article.tex
- pdflatex article.tex
-
-clean:
- rm article.aux article.log article.toc article.pdf article.bbl article.blg
diff --git a/docs/research/README.md b/docs/research/README.md
deleted file mode 100644
index fcad6322f2..0000000000
--- a/docs/research/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Protocol description
-
-This folder contains a compact description of the protocol behind PRISM.
-In order to compile the document, you only need `pdflatex` installed.
-Once installed you could either run:
-
-```bash
-$ pdflatex article.tex
-```
-
-or use the Makefile by running:
-
-```bash
-$ make
-```
-
-You can delete the generated files by running:
-
-```
-$ make clean
-```
diff --git a/docs/research/article.bib b/docs/research/article.bib
deleted file mode 100644
index 9d7efacef6..0000000000
--- a/docs/research/article.bib
+++ /dev/null
@@ -1,55 +0,0 @@
-@inproceedings{abr01,
- author = {Michel Abdalla and
- Mihir Bellare and
- Phillip Rogaway},
- editor = {David Naccache},
- title = {The Oracle Diffie-Hellman Assumptions and an Analysis of {DHIES}},
- booktitle = {Topics in Cryptology - {CT-RSA} 2001, The Cryptographer's Track at
- {RSA} Conference 2001, San Francisco, CA, USA, April 8-12, 2001, Proceedings},
- series = {Lecture Notes in Computer Science},
- volume = {2020},
- pages = {143--158},
- publisher = {Springer},
- year = {2001},
- url = {https://doi.org/10.1007/3-540-45353-9\_12},
- doi = {10.1007/3-540-45353-9\_12},
- timestamp = {Fri, 27 Dec 2019 21:26:42 +0100},
- biburl = {https://dblp.org/rec/conf/ctrsa/AbdallaBR01.bib},
- bibsource = {dblp computer science bibliography, https://dblp.org}
-}
-
-@inproceedings{davi01,
- author = {Don Davis},
- editor = {Yoonho Park},
- title = {Defective Sign {\&} Encrypt in S/MIME, PKCS{\#}7, MOSS, PEM, PGP,
- and {XML}},
- booktitle = {Proceedings of the General Track: 2001 {USENIX} Annual Technical Conference,
- June 25-30, 2001, Boston, Massachusetts, {USA}},
- pages = {65--78},
- publisher = {{USENIX}},
- year = {2001},
- url = {http://www.usenix.org/publications/library/proceedings/usenix01/davis.html},
- timestamp = {Mon, 01 Feb 2021 08:43:38 +0100},
- biburl = {https://dblp.org/rec/conf/usenix/Davis01.bib},
- bibsource = {dblp computer science bibliography, https://dblp.org}
-}
-
-@inproceedings{kraw01,
- author = {Hugo Krawczyk},
- editor = {Joe Kilian},
- title = {The Order of Encryption and Authentication for Protecting Communications
- (or: How Secure Is SSL?)},
- booktitle = {Advances in Cryptology - {CRYPTO} 2001, 21st Annual International
- Cryptology Conference, Santa Barbara, California, USA, August 19-23,
- 2001, Proceedings},
- series = {Lecture Notes in Computer Science},
- volume = {2139},
- pages = {310--331},
- publisher = {Springer},
- year = {2001},
- url = {https://doi.org/10.1007/3-540-44647-8\_19},
- doi = {10.1007/3-540-44647-8\_19},
- timestamp = {Tue, 14 May 2019 10:00:48 +0200},
- biburl = {https://dblp.org/rec/conf/crypto/Krawczyk01.bib},
- bibsource = {dblp computer science bibliography, https://dblp.org}
-}
diff --git a/docs/research/article.tex b/docs/research/article.tex
deleted file mode 100644
index 1967938f10..0000000000
--- a/docs/research/article.tex
+++ /dev/null
@@ -1,458 +0,0 @@
-\documentclass[10pt,a4paper]{article}
-\usepackage[margin=1in]{geometry}
-\usepackage{hyperref}
-\usepackage{xcolor}
-\usepackage{graphicx}
-
-\newcommand{\figref}[1]{Fig. \ref{#1}}
-\newcommand{\tabref}[1]{Table \ref{#1}}
-\newcommand{\lstref}[1]{Listing \ref{#1}}
-\newcommand{\secref}[1]{Section \ref{#1}}
-\newcommand{\appref}[1]{Appendix \ref{#1}}
-\newcommand{\todo}[1]{{\colorbox{red}{\bf TODO:}\textcolor{red}{#1}}}
-
-
-\title{PRISM protocol v0.3}
-
-\author{Atala PRISM team - IOG}
-
-\date{}
-
-\begin{document}
-
-
-\maketitle
-
-\begin{abstract}
-This document describes the current state of the protocol that supports PRISM. PRISM is a framework for the management of decentralized identifiers and verifiable credentials.
-\end{abstract}
-
-\setcounter{tocdepth}{3}
-
-\tableofcontents
-\newpage
-
-%----------------------------------------------------------------------------------------
-% INTRODUCTION
-%----------------------------------------------------------------------------------------
-
-\section{Introduction and protocol description}
-
-In this section we will informally describe a protocol to create and manage DIDs (\textbf{D}ecentralized \textbf{Id}entifiers), that also allows to manage the creation, revocation and presentation of verifiable credentials.
-
-\subsection{DIDs management}
-
-For simplicity, we will define a DID as a string identifier of the form
-
-\begin{center}
- \emph{did:prism:$\langle$identifier$\rangle$}
-\end{center}
-
-Each DID is associated to a list of public keys. The list can be updated over time by adding and revoking keys. Each key has an assigned role during its lifetime. The three possible roles we describe are:
-
-\begin{description}
-\item[issuing keys] used to issue credentials on behalf of the DID.
-\item[revocation keys] used to revoke credentials on behalf of the DID.
-\item[master keys] used to add or revoke other keys associated to the DID.
-\end{description}
-
-Our construction for DID management relies on an underlying blockchain. The blockchain allows us to publish transactions with sufficiently large metadata. We use the blockchain for both; data distribution along parties, and for consensus related to the order of relevant events in our protocol. All the parties participating in our protocol are running a full node that reads the metadata of the blockchain transactions. When they find a protocol event, they process it to construct their view of the system.
-
-In the following sub-sections we describe the events in our protocol related to DID management.
-
-\subsubsection{DID creation}
-
-In order to create a DID, a user will follow the steps below:
-
-\begin{itemize}
-\item The user generates a desired list of public keys. He will associate each key to a key identifier and a role.
- There must be at least one key with the master role in this initial list.
-\item The resulting list is encoded and hashed producing an encoded \emph{initial\_state} and a \emph{hash}.
-\item The \emph{hash} is encoded in hexadecimal form producing an \emph{encoded\_hash}.
-\item The user DID is constructed as \emph{did:prism:encoded\_hash}.
-\item The user will send in the metadata of a blockchain transaction a signed \emph{CreateDID} event.
- The event contains the list of public keys with their identifiers and roles.
- The signature will be performed using the private key associated to any of the master keys in the initial list of keys.
-\end{itemize}
-
-Once the transaction is added to the blockchain with a sufficient number of confirmations $d$ (i.e. the block containing the transaction has $d$ blocks appended after it on the underlying chain), all the participants following our protocol will validate the signature of the CreateDID event. After verification, they will register the DID as \emph{published} along with all the keys posted in the initial list. The keys will be considered valid since the time associated to the event carrier transaction. This is a timestamp generated by the blockchain.
-
-Optionally, while waiting for blockchain confirmation, the user can also use the associated DID
-
-\begin{center}
-\emph{did:prism:encoded\_hash:initial\_state}
-\end{center}
-
-This DID, called \emph{long form} or \emph{unpublished} DID will not be recognised as \emph{published} by other parties in the protocol. However, other users would be able to verify that the list of keys encoded in \emph{initial\_state} corresponds to the DID \emph{did:prism:encoded\_hash}. The recipient of an unpublished DID needs to query the state of the \emph{short form} of the DID (the prefix before "\emph{:initial\_state}") to check for changes in the list of keys.
-
-\subsubsection{DID update}
-
-Updating a DID means that the user controlling a master key associated to it, will add new or revoke existing keys to the list associated to the DID. In order to update the list of keys, the user will:
-\begin{itemize}
-\item Create the list of key identifiers that the user wants to revoke from the current state of his DID.
-\item Create a list of keys he wants to add to the list of keys associated to his identifier.
- This keys must be associated to a role and a fresh key identifier.
-\item Create an \emph{UpdateDID} event that contains the two previously generated lists.
-\item Sign the even with one of the \emph{currently associated} master keys of the DID, and publish the event inside the metadata of a transaction.
-\end{itemize}
-
-When the event added to the blockchain with enough confirmations, all parties will process the event. This is, they will validate the signature, and update their internal knowledge of the updated DID. The newly added keys and the revoked keys will be timestamped using the time of the carrier transaction.
-
-With these two events we have shown how to create and update DIDs.
-
-\subsubsection{Obtain the list of keys associated to a DID}
-
-Now, we can define the process that a protocol participant must follow to obtain the keys associated to a list.
-We mentioned that a DID can be presented in long form (or as an unpublished DID), or in its short form. Hence, we will describe the process in the two cases.
-
-In order to obtain the keys associated to a DID in short form, we simply read the state we have constructed so far by processing the CreateDID and UpdateDID events we read from the blockchain. The list of DIDs associated to the DID is the list of keys we see in our internal state. If we do not find the DID as a published one in our state, we return a \emph{DID unknown} response.
-
-Now, if the DID we receive is in long form, we first verify that $hash(initial\_state) = initial\_hash$. If that check fails we reply with an \emph{Invalid DID} response. If the check passes, we extract the short form of this DID (the prefix that ends right before the last \emph{":"}), and we check the list of keys as described in the previous paragraph. If the result of this process is a \emph{DID unknown} response, then we decode the list from the \emph{initial\_state} suffix and return this list as a result.
-
-With this process we complete the events and operations related to DIDs.
-Lets now move into the events related to issue and revoke credentials.
-
-\subsection{Verifiable credentials}
-
-Now that we have DIDs, we can proceed to the creation and revocation of credentials. A \emph{credential} to us is a set of \emph{claims}. Each claim is represented by a property-value pair, e.g. $(name, John)$. Credentials are created by \emph{issuers} to \emph{subjects}. Both issuers and subjects are represented with their corresponding DIDs.
-
-For practical reasons, we assume that issuers want to issue credentials in batches. This is, an issuer would like to create multiple credentials at once. Below we describe the steps to issue a batch of credentials.
-
-\subsubsection{Credentials batch issuance}
-
-Lets us define first what a credential is in the context of this document
-
-\begin{description}
-\item[Credential] a credential is a JSON document that contains three key-value maps.
- \begin{itemize}
- \item the "issuer" key represents the issuer DID.
- \item the "keyId" key represents the key identifier associated to the issuer DID that was used to sign the credential.
- \item the "claims" key represents claims that the issuer makes about the subject. The claims are grouped in a JSON object.
- There is a key for each claim asserted by the issuer. One particular claim is the subject DID, that represents a DID
- controlled by the subject of the credential.
- \end{itemize}
-\end{description}
-
-In the following steps, we assume the issuer and subjects have already established a secure communication channel.
-In order to issue a batch of an arbitrary number $N$ of credentials, the issuer will:
-\begin{itemize}
-\item Ask to each subject the DID they would like to use for the credential they will receive.
-\item The issuer creates the credentials for each subject. It uses his DID and the key id of an \emph{issuing key} to populate
- the credential corresponding fields. On each credential, it adds the corresponding subject DID as one of the claims.
-\item The issuer encodes each individual credential using a base64URL encoding.
-\item The issuer signs each individual encoded credential with an \emph{issuing key} associated to his publicly known DID.
- The signing key corresponds to the key id in the credentials' claims.
-\item The issuer encodes each signature, and concatenate them to their corresponding credentials using a dot (".") as separator.
- This produces strings of the form $\langle{}encoded\_credentials\rangle{}.\langle{}encoded\_signature\rangle{}$. We call these strings as \emph{signed credentials}.
-\item Now, the issuer takes all the signed credentials, and computes a Merkle root from them.
-\item The issuer creates an \emph{IssueBatch} event that contains the Merkle root, and signs the event with an \emph{issuing key} associated to his publicly known DID.
-\textbf{Note}: Today we use the same issuing key for the event signature and the individual credentials signature, but those could be different keys.
-\item The issuer attaches the signed event to the metadata of a transaction and sends it to the blockchain.
-\item The issuer gives to each user their corresponding signed credential along with their associated Merkle inclusion proof.
-\end{itemize}
-
-Likewise previous protocol events, all parties will process the transaction once confirmed, validate the event signature and timestamp the Merkle root with the time associated to the carrier transaction.
-
-Note that, to issue credential batches, the issuer DID \emph{must be published} already. However, users' DIDs can remain unpublished.
-
-\subsubsection{Credentials revocation}
-
-In order to revoke a credentials, issuers have two alternatives:
-\begin{enumerate}
-\item They revoke all the credentials in a batch.
- This is done by signing a \emph{RevokeBatch} event with a \emph{revocation} key associated to the issuer DID.
- The signed event contains the Merkle root to revoke.
-\item They revoke specific credentials associated to a batch.
- This is done by signing a \emph{RevokeCredentials} event with a \emph{revocation} key associated to the issuer DID.
- The signed event contains both the Merkle root associated to the credentials to revoke, and the hashes of the specific credentials to revoke.
-\end{enumerate}
-
-In both variants, the event is published on-chain and processed by all the participants. The participants will timestamp the new information with the carrier transaction time.
-
-\subsection{Credential presentation and verification}
-
-Once they receive credentials from issuers, subjects will present them to interested parties, called \emph{verifiers}. For example, a student may receive a verifiable credential from a university, and would like to present his credential to a potential employer. In our setting, the verifier will be a party following our protocol events from the blockchain. The steps to present and verify a credential are the following:
-\begin{itemize}
-\item We assume a safe communication channel between the subject and verifier.
- We also assume that the channel can be identified by an identifier $ch$.
-\item The subject shares his credential and Merkle inclusion proof to the verifier.
-\item The verifier then:
- \begin{itemize}
- \item computes the Merkle root from the inclusion proof and the credential hash;
- \item extracts the issuer DID and signing key id from the credential claims,
- \item retrieves from the state he computed from the blockchain, the timestamps associated to the Merkle root and the issuing key,
- \item checks if the Merkle root or credential hash has been revoked
- \item validates the issuer signature on the credential, and determines if it was signed at a time when the issuer key was valid.
- \end{itemize}
- \textbf{Note:} The signature of the IssueBatch event has already been verified by the protocol participants at the time of batch publication.
-\item At this point, the verifier knows that the credential was properly signed. Now, he shares a nonce to the subject and asks him to sign it with a key associated to his DID.
-\item The subject signs the hash of $nonce || ch$ and returns the signature to the verifier.
-\item The verifier now checks the signature and concludes that the credential subject is indeed the person presenting it.
-\end{itemize}
-
-\subsection{Specific Sub-Protocols}
-\label{ssec:subproto}
-
-\subsubsection{End-to-End Encrypted Data Exchange via the Connector}
-\label{sssec:e2eeconnector}
-
-Assuming each participant of the system owns a (set of) DIDs, we want to let
-holders receive new credentials from issuers, in a secure -- yet convenient --
-manner. We describe a protocol for this purpose. At the moment, we require
-that issuer and prospective holder can perform a one-time physical interaction
-with each other or, alternatively, that they have a secure channel at their
-availability (although possibly cumbersome to use, hence we only require to use
-it once).
-
-In a nutshell, the proposed protocol (see \figref{fig:e2econnector}) is composed
-of two phases. In the first phase, the issuer shares a QR code with the holder.
-This QR code contains the issuer's public key (or, maybe, its DID), a random
-value ($code$, in \figref{fig:e2econnector}), and a session identifier ($token$,
-in \figref{fig:e2econnector}). This exchange takes place through a secure (i.e.,
-confidential and authenticated) channel, such as physical exchange. Probably
-immediately, although also possibly at a later point in time, the prospective
-holder sends back her own public key (or, alternatively, her DID) authenticated
-with a MAC, where we use the random value
-in the QR code as secret key. The issuer uses the session identifier to locate
-the corresponding session and, now that it received in an authenticated manner
-the public key of the holder, can leverage hybrid encryption techniques to
-establish an end-to-end encrypted session, e.g., using ECIES \cite{abr01}.
-Ensuring end-to-end encryption and authentication is important at this point, as
-all the information exchanged between issuer and holder is routed through the
-connector: an entity or set of entities who ensure that asynchronous
-communication can take place between issuer and holder. Thus, ECIES is crucial
-to ensure the secrecy of the information, end-to-end. Since the holder needs to
-have certainty that the data it receives comes from the issuer (and ECIES does
-not authenticate the sender), we have the issuer digitally sign the data it
-sends through the ECIES-secured channel. Note that we could involve the random
-value shared via the QR code for this. But that would force us to re-execute the
-first phase of the protocol even when the QR code is compromised after having
-completed the first phase of the protocol. Digital signatures using the issuer's
-key pair prevent this.
-
-Note that we are combining signature and encryption (plus, the MAC-based
-authentication natively provided by ECIES). This places us in the context put
-forward in \cite{davi01}, that discusses some good practices when combining such
-primitives. In a nutshell, we follow the sign-then-encrypt approach%
-\footnote{What we call here ``sign-then-encrypt'' is called ``Sign \& Encrypt''
- in \cite{davi01}. However, we find
- more explicit the naming convention of \cite{kraw01}, which is more consistent
- when referring to the order in which the primitives are used -- order that, in
- these cases, is crucial.}, combined with including the public
-keys of both holder and issuer into the signed (and then encrypted) data.
-This intertwines the inner layer (the signature) with the outer layer (the
-encryption), ensuring that any alteration on any of them will be detected.
-We note that there are other options that, a priori, are equally secure -- e.g.,
-encrypt-then-sign, including also the sender's public key in the encrypted data
-(but that would require separately signing a (hash of) the data, to ensure
-non-repudiation). Finally, it is worth noting that ECIES follows the
-encrypt-then-authenticate approach, which is proven to realize secure channels
-in \cite{kraw01}, as long as the underlying encryption scheme is IND-CPA secure,
-and the underlying MAC function is secure -- but this is independent of the
-sign-then-encrypt approach.
-
-While the protocol is simple, and is based on common cryptographic constructions,
-it is easy to get such protocols wrong, as pointed out in numerous occasions
-\cite{davi01, kraw01}. Hence, we have used Tamarin%
-\footnote{https://tamarin-prover.github.io/ (Last access, October 5th, 2021.)}
-to model our protocol% \footnote{Source code for the model available here
- % \url{https://github.com/input-output-hk/atala-prism/blob/research-tamarin-connector/prism-backend/research/formalizations/connector.spthy}
- % \todo{Update to final URL.}}
-, which is schematized in \figref{fig:e2econnector}
-
-\begin{figure}[ht!]
- \centering
- \includegraphics[scale=0.4]{figures/connector.png}
- \caption{Sequence diagram for the E2EE protocol through the connector.}
- \label{fig:e2econnector}%
-\end{figure}
-
-The security properties that we prove are as follows (we refer to the actual
-model for the formal definition):
-
-\begin{description}
-\item[Code secrecy.] Codes exchanged via a QR code are always confidential.
- Note that this is trivial, as it is exchanged via a secure channel, and
- the holder never sends it back in the clear.
-\item[Code injectivity.] Every QR code that is received by a holder, allegedly
- sent by an issuer, was sent before by that issuer. Again, this is trivial due
- to the secure channel used to exchange the QR code.
-\item[Holder's public key authenticity.] Every message received by an issuer
- in step 3, containing a public key for key-exchange usage, that was
- allegedly sent by a holder, was indeed previously sent by that holder.
- This is ensured through the message authentication code (MAC) that
- leverages the random QR code exchanged via the secure channel, which is
- only known to the issuer and the holder.
-\item[Holder's public key injectivity.] For every session in which an
- issuer receives in step 3 a message, allegedly sent by a holder in
- step 2, containing the holder's public key, it was that holder, in
- the same session, who sent the public key in step 2.
-\item[Credential secrecy.] This property has two variants: it ensures
- secrecy for credentials sent by an honest issuer to a holder that was honest
- at the moment of sending the credential, and is not compromised afterwards;
- and it also ensures secrecy for credentials received by an honest holder,
- allegedly from an honest issuer, if both holder and issuer were honest when
- the credential is received, and they are not corrupted afterwards. Altogether
- with the credential authenticity property shown next, it ensures that only
- (honest) holder and issuer learn the contents of a credential.
-\item[Credential authenticity.] All messages containing a credential, received
- by a holder at step 5, where indeed sent by an issuer and addressed to that
- same holder, as long as both issuer and holder keys are not compromised.
-\item[Credential non-injectivity.] All messages containing a credential, received
- by a holder at step 5, were sent by an issuer and addressed to that holder
- at step 4. Note, however, that both messages in steps 4 and 5 don't necessarily
- pertain to the same ``session'' (in instantiations where parallel sessions
- may occur). However, our current understanding is that this does not pose a
- risk, as in any case, the messaged is sent by the issuer, and received by the
- intended holder. For the sake of future reference, we give an illustrative
- flow of this ``non-injective'' execution, in \appref{app:non-inj-e2ee}.
-\end{description}
-
-%----------------------------------------------------------------------------------------
-% PROPERTIES
-%----------------------------------------------------------------------------------------
-
-\section{Properties we believe we have}
-
-\paragraph{Unforgeability.} A user cannot forge issuer-generated credentials.
-\paragraph{Privacy against issuers.} An issuer does not learn whether a user (who acquired a credential from the issuer) has interacted with a given verifier (assuming no cooperation between issuer and verifiers).
-\paragraph{Revocability.} An issuer can revoke a previously-issued credential without users’ cooperation.
-\paragraph{Protection against credential reuse.} A verifier can ensure no credential is used by different DIDs.
-\paragraph{Deferred and offline verification checks.} A verifier can verify the revocation status of a credential asynchronously without users’ participation. A verifier can also verify a credential without contacting the internet, in which case the verification result is with respect to the last time the verifier synchronized with blockchain events.
-\paragraph{Resistance to key rotation.} If an issuer's issuing key is revoked, credentials already issued by that key remain valid. This means that the issuer does not need to re-issue the credentials signed by a compromised issuing key. This is possible due to the fact that key revocation and credential issuances are timestamped. Hence, if an issuing key is revoked, credentials already issued remain valid unless they are explicitly revoked.
-\paragraph{Independence of issuers and verifiers.} Verifiers only need to know which issuer DIDs they trust. They do not need to contact issuers to query issuance nor revocation lists.
-\paragraph{Past preservation.} Any party that verifies a credential has non-reputable information of when the credential was valid even if it has been revoked.
-
-\section{Properties we are not offering}
-
-\paragraph{Unlinkability.} A coalition of verifiers cannot link activities of users across verifiers.
-\paragraph{Minimal disclosure.} A user only reveals what is necessary to establish a relationship with a verifier.
-
-%----------------------------------------------------------------------------------------
-% PROPOSED IDEAS
-%----------------------------------------------------------------------------------------
-
-% \section{Ideas to add missing properties}
-\section{Further thoughts}
-
-\subsection{Scaling}
-
-In the previous sections we described a protocol that allows transparent management of decentralized identifiers and credentials.
-The protocol usage of a blockchain facilitates the solution to three points:
-\begin{itemize}
-\item Events ordering: All participants can agree on the order in which any pair of events occurred
-\item Fork protection (safety): There is no point in time where a valid sequence of processed protocol events could become invalid, nor replaced by
- another sequence
-\item Data transmission: All participants receive the same sequence of protocol events as they are transmitted through the blockchain
-\end{itemize}
-
-The second point has particular relevance to guarantee that the history of events can be trusted. For instance, imagine Alice controls a DID (its
-associated master keys) that represents a real world object, like a car. If Alice's wants to sell the car and transfer the corresponding DID, she
-would perform a DID update that revokes all the master keys controlled by her, and adds a new master key controlled by the new car owner. If Alice
-could later "undo" this update, the DID ownership transfer could not be trusted.
-
-The properties brought by the blockchain come at a cost. Namely, the protocol throughput (number of events that participants can attach in
-transactions per unit of time) is bounded to the blockchain throughput. In order to scale the throughput, we have reviewed Sidetree's approach. The
-idea behind Sidetree is to not publish individual events attached to blockchain transactions, but to publish a hash-link to an off-chain content
-addressable storage (CAS), like IPFS. The CAS will contain the file referenced by the hash-link, which contains a batch of many protocol events
-(allowing to transcend the size limits imposed by blockchain transactions).
-
-We identify a non-ideal drawback about this approach, which is a loss of the safety property. In the setting defined by Sidetree, participants can
-control changes to the past of their identifiers. This is, if Alice desires, she could create a DID by posting a file, $F_{1}$ on the CAS, and its
-hash on-chain, $F_{1}$ would contain Alice $CreateDID$ event. Later, Alice can create a file $F_{2}$, containing an $UpdateDID$ event, post the hash
-of $F_{2}$ on-chain, but intentionally not posting $F_{2}$ on the CAS. Then, Alice can create a third file $F_{3}$ which contains an new $UpdateDID$
-event that would be invalid if $F_{2}$ were revealed in the CAS (e.g. $F_{2}$ event could revoke the key that signs the update in $F_{3}$). However,
-Alice can post both $F_{3}$ in the CAS and the hash of $F_{3}$ on-chain. In Sidetree, this sequence of actions would lead all protocol participants
-to believe that Alice's DID is in certain state, produced by the $CreateDID$ of $F_{1}$ and the $UpdateDID$ from $F_{3}$.
-
-Alice has the power to reveal at any point $F_{2}$, making the update from $F_{3}$ as invalid, and forcing all participants to update Alice's
-DID to the state reflected by $F_{2}$. This is known as \textbf{late publish attack}, and makes technically impossible to trust the past history
-history of events in Sidetree implementations. As an example consequence, it is not possible to transfer ownership of a DID when using Sidetree.
-
-In order to avoid introducing this issue, we considered two options.
-
-The first one is to add a permissioned actor (or a federation of them) that are allowed to batch protocol events and publish both on-chain hash-links,
-and files in corresponding CAS. The special actor introduces a trust model that assumes that it will always reveal files. If the actor fails to
-reveal a file, the protocol participants stop processing further batches until the missing file is revealed. This is not possible in Sidetree because
-any participant would be able to freeze the system by not revealing a file. The protocol would still allow users to publish events on-chain directly,
-leaving space for some decentralization for those participants that do not want to depend on the centralized batcher.
-
-In order to use the batcher to publish the $UpdateDID$ events associated to a DID $D$, the owner of $D$ will have to declare on-chain that future
-updates for $D$ will be found on the off-chain batches. This public declaration is needed to avoid race conditions when a hash is on-chain but
-a file is not revealed.
-
-A second option consist of a more decentralized variation of the previous approach. Namely, allow any participant to propose itself as an event
-batcher. The proposal is performed by submitting an on-chain event declaring the batcher DID. Every user that would like to batch its events
-off-chain should publicly notify the batcher they will use. Only one batcher can be assigned to each DID, this is, the $UpdateDID$ events associated
-to a DID will be published by at most one batcher. The objective is once again, to be able to identify missing files in a sequence, allowing
-participants to stop processing files out of order.
-
-In any of the approaches, if the associated batcher stops publishing files, the DID registered to the batcher is "frozen" until the file is revealed.
-We could re-introduce some liveness protection to DID owners by requesting batches from a same batcher to be separated at least $N$ underlying
-blockchain blocks, in order to provide that time for users to send a $Contention$ event that invalidates the events associated to a DID published in
-the previous batch. During the processing of batches, participants will wait the contention period (the $N$ blocks) before applying events from a
-batch in order to not apply the updates to contended DIDs.
-
-Notes:
-\begin{itemize}
-\item we could change the $CreateDID$ event to incorporate an optional batcher DID from start, the batcher DID would be part of the initial state,
- bounding the batcher to the DID suffix.
-\item we could support events to de-register from a batcher, and to switch batcher too.
-\item a user is free to batch his own DIDs' related events. This could be useful for a case like a car manufacturer that would like to batch all DIDs
- associated to their cars, allowing car owners to de-register from a batcher at will.
-\item a priori, we do not see a need to batch credential issuance/revocation events
-\end{itemize}
-
-\appendix
-
-\section{Example of Non-Injective Execution of Connector E2EE Protocol}
-\label{app:non-inj-e2ee}
-
-The diagram in \figref{fig:e2econnector-noninj} depicts an scenario in which a
-non-injective execution between the issuer and the holder takes place.
-
-First, we have to take into account that such execution requires a somehow
-extended setting than what we considered in \secref{sssec:e2eeconnector}. Namely,
-here we assume a (certainly realistic) scenario in which any prospective holder
-has several computing devices -- e.g., a laptop and a smartphone. Similarly,
-the issuer instantiates several concurrent services -- depicted as
-\emph{Instance 1} and \emph{Instance 2} in \figref{fig:e2econnector-noninj}.
-Still, the same user operates both computing devices, which share the same
-private values; and all issuer instances have access to the same database with
-holders' information, and the issuer's private keys.
-
-\begin{figure}[ht!]
- \centering
- \includegraphics[scale=0.32]{figures/connector-noninjective.png}
- \caption{Sequence diagram of a sample non-injective execution of the E2EE
- connector protocol}
- \label{fig:e2econnector-noninj}%
-\end{figure}
-
-In the diagram, the holder requests a QR code once with each device -- for this,
-we envision an extended setting in which the holder can request QR codes
-securely (e.g., through signed email). The smartphone communicates with Instance
-2 of the issuer, while the laptop communicates with Instance 1. Eventually, the
-holder decides to complete the process from her laptop. The connector forwards
-the request to Instance 1, which stores it in the database. Then, it might
-happen that it is Instance 2 the one that takes care of finalizing the request:
-it fetches the necessary data from the shared database, and sends the result to
-the holder's phone (which is the computing device that communicated with
-Instance 2). This lack of correspondence between who initiated a request, and
-who responds it, is what is formally referred to as non-injectivity.
-Theoretically, that might perfectly happen in our proposed protocol, as there is
-nothing preventing such behaviour in a cryptographic manner, as our model shows.
-However, two observations are relevant:
-
-\begin{itemize}
-\item The authenticity and secrecy properties still ensure that only the holder
- and issuer have access to the data that is sent by the issuer, and that it
- indeed originates from the issuer.
-\item Such behaviour, if deemed unwanted, could be easily prevented through a
- proper session handling, at the implementation level.
-\end{itemize}
-
-\bibliographystyle{plain}
-\bibliography{article}
-
-\end{document}
diff --git a/docs/research/figures/connector-noninjective.png b/docs/research/figures/connector-noninjective.png
deleted file mode 100644
index 9c6a7a978f..0000000000
Binary files a/docs/research/figures/connector-noninjective.png and /dev/null differ
diff --git a/docs/research/figures/connector.png b/docs/research/figures/connector.png
deleted file mode 100644
index 8963c2652d..0000000000
Binary files a/docs/research/figures/connector.png and /dev/null differ
diff --git a/docs/research/formalizations/README.md b/docs/research/formalizations/README.md
deleted file mode 100644
index ce75ff0b7c..0000000000
--- a/docs/research/formalizations/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Quick Intro
-
-To install Tamarin, follow the installation section in [Tamarin's tutorial](https://tamarin-prover.github.io/manual/tex/tamarin-manual.pdf).
-
-Then, to run the tool on the model, execute:
-
-```
-$ tamarin-prover connector.spthy --prove
-```
diff --git a/docs/research/formalizations/connector-ets.spthy b/docs/research/formalizations/connector-ets.spthy
deleted file mode 100644
index e59b9ca16b..0000000000
--- a/docs/research/formalizations/connector-ets.spthy
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Description: Model for e2e based on QR code sharing and ECIES
- * Encrypt-then-Sign approach.
- * Date: 20210922
- * Authors: Jesus Diaz Vico, IOHK Atala and Research teams.
- */
-
-theory Connector
-begin
-
-/*
- The protocol we want to model is, in Alice and Bob notation, as follows:
-
- (I stands for Issuer, H for Holder, C for connector)
- ( => is secure channel; -> is normal channel )
-
- 1. I => H: tid, pk_iss, code // Shared via QR, i.e., secure channel
- 2. H -> C: , mac(pk_hol, code)
- 3. C -> I: mac(pk_hol, code)
- 4. I: Fetch code used for session tid
- Verify mac(pk_hol, code)
- r = Fresh(Z)
- k = (pk_hol)^r
- k_1 || k_2 = kdf(k)
- c = senc((pk_iss, cred), k_1)
- s = sig((c, cred), sk_iss)
- m = mac(c, k_2)
- 5. I -> C:
- 6. C -> H:
- 7. H: k = (g^r)^u
- k_1 || k_2 = kdf(k)
- m' = mac(c, k_2)
- Assert m = m'
- (pk_iss, cred) = sdec(c, k_1)
- verify(sig, (c, cred), pk_iss)?
-
- Steps 1-3 are the QRdid protocol, whereby the Issuer gets assured that
- the holder's public key is pkH. It needs a secure channel.
-
- Steps 4-7 correspond to an execution of an ECIES protocol, whereby the
- Issuer leverages the holder's public key obtained in the previous steps.
-
- After this, the holder is assured that the credential s/he received comes
- from the legitimate issuer, whereas the issuer is assured that only the
- intended holder received it. Of course, unless any of them is compromised.
-
- Note: The connector is not modelled here because it is treated essentially
- as an untrusted party -- hence, it *is* the adversary, which is natively
- modelled by Tamarin as a Dolev-Yao adversary.
-
-*/
-
-builtins: hashing, symmetric-encryption, asymmetric-encryption, signing,
- diffie-hellman
-
-/* Note: splitL, splitR and merge functions are aimed to be used for
- splitting bitstrings to be used as keys. However, they do not model
- that leaking part of a key *does* leak much information about that
- key. Hence, for this modelling to be valid, we should check that all
- the larger bitstring and its splits are kept secret. */
-functions: kdf/1, splitL/1, splitR/1, merge/2, mac/2
-
-equations: merge(splitL(v),splitR(v)) = v
-
-/* Rule for sending a message through a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanOut_S:
- [ Out_S($A,$B,x) ]
- --[ ChanOut_S($A,$B,x) ]->
- [ Sec($A,$B,x) ]
-
-/* Rule for sending a message from a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanIn_S:
- [ Sec($A,$B,x) ]
- --[ ChanIn_S($A,$B,x) ]->
- [ In_S($A,$B,x) ]
-
-rule Register_pke:
- [ Fr(~x) ]
- --[ RegisteredE($A) ]->
- [ !Pke($A, 'g'^~x),
- !Ltke($A, ~x) ]
-
-rule Register_pks:
- [ Fr(~x) ]
- --[ RegisteredS($A) ]->
- [ !Pks($A, pk(~x)),
- !Ltks($A, ~x) ]
-
-rule Reveal_ltke:
- [ !Ltke(A, ltke) ]
- --[ LtkeReveal(A) ]->
- [ Out(ltke) ]
-
-rule Reveal_ltks:
- [ !Ltks(A, ltks) ]
- --[ LtksReveal(A) ]->
- [ Out(ltks) ]
-
-/* Rule for fetching an existing DH public key. */
-rule Get_pke:
- [ !Pke(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for fetching an existing public key for signature verification. */
-rule Get_pks:
- [ !Pks(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for initializing the issuer */
-rule Issuer_Init:
- [ Fr(~id), !Ltks($I, ~lkIs) ]
- --[ Create($I, ~id), Role('I') ]->
- [ St_Issuer_0($I, ~id, ~lkIs, pk(~lkIs)) ]
-
-/* Rule for initializing the holder */
-rule Holder_Init:
- [ Fr(~id), !Ltke($H, ~lkHe) ]
- --[ Create($H, ~id), Role('H') ]->
- [ St_Holder_0($H, ~id, ~lkHe, 'g'^~lkHe) ]
-
-/* The issuer shares the QR code via the secure channel. */
-rule Issuer_1:
- [ Fr(~code),
- Fr(~tid),
- St_Issuer_0($I, ~id, ~lkIs, pkIs) ]
- --[ Running_I_1($I, $H, <'H', 'I', <~tid, ~code, pk(~lkIs)>>) ]->
- [ Out_S($I, $H, <~tid, ~code, pkIs>),
- St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code) ]
-
-/* The holder receives the QR code through the secure channel, and responds. */
-rule Holder_2:
- let mc = mac(pkHe, ~code) in
- [ In_S($I, $H, <~tid, ~code, pkIs>),
- St_Holder_0($H, ~id, ~lkHe, pkHe) ]
- --[ SecretCode(~code), Honest($H), Honest($I),
- Commit_H_1($H, $I, <'H', 'I', <~tid, ~code, pkIs>>),
- Running_H_1($H, $I, <'H', 'I', <~tid, pkHe, mc>>)]->
- [ Out(<<~tid, pkHe>, mc>),
- St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs) ]
-
-/* The issuer receives back the MAC'ed QR code, via an insecure channel,
- plus the Holder's public key. */
-rule Issuer_3:
- let mc_pkh = mac(pkHe, ~code)
- k = kdf(pkHe^~r)
- k1 = splitL(k)
- k2 = splitR(k)
- ccred = senc(, k1)
- mc_cred = mac(ccred, k2)
- sig = sign(h(<~cred, ccred>), ~lkIs)
- in
- [ St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code),
- In(<<~tid, pkHe>, mc_pkh>),
- Fr(~r), Fr(~cred) ]
- --[ SecretCode(~code), SecretCred(~cred), Role('I'),
- Honest($H), Honest($I),
- AuthenticPK($H, <'H', 'I', ~tid, pkHe, mc_pkh>),
- Commit_I_1($I, $H, <'H', 'I', <~tid, pkHe, mc_pkh>>),
- Running_I_2($I, $H, <'H', 'I', >) ]->
- [ Out() ]
-
-/* The holder receives the ECIES-encrypted message by the issuer, and
- decrypts it. */
-rule Holder_4:
- let k = kdf(R^~lkHe)
- k1 = splitL(k)
- k2 = splitR(k)
- cred = snd(sdec(ccred, k1))
- mc_cred = mac(ccred, k2)
- in
- [ St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs),
- In() ]
- --[ Eq(pkIs, fst(sdec(ccred, k1))),
- Eq(verify(sig, h(), pkIs), true),
- SecretCred(cred),
- AuthenticCred($H, <'H', 'I', >),
- Honest($H), Honest($I), Role('H'),
- Commit_H_2($H, $I, <'H', 'I', >)]->
- []
-
-restriction Equality:
- "All x y #i. Eq(x,y) @i ==> x = y"
-
-/* Safety check */
-lemma executable:
- exists-trace
- "
- Ex I H idi idh #i1 #j1.
- Create(I, idi) @i1 & Create(H, idh) @j1
- & ( Ex m #i2 #j2. Running_I_1(I, H, m) @i2 & i1 < i2
- & Commit_H_1(H, I, m) @j2 & j1 < j2
- & ( Ex m2 #i3 #j3. Running_H_1(H, I, m2) @i3 & i2 < i3
- & Commit_I_1(I, H, m2) @j3 & j2 < j3
- & ( Ex m3 #i4 #j4. Running_I_2(I, H, m3) @i4 & i3 < i4
- & Commit_H_2(H, I, m3) @j4 & j3 < j4
- )
- )
- )
- "
-
-/* Secrecy of the code sent by the Issuer and back by the Holder */
-lemma code_secrecy:
- "All c #i. SecretCode(c) @ #i ==> not (Ex #j. K(c) @ #j)"
-
-/* Authenticity of the public key received by the Issuer from the Holder */
-lemma pkH_authenticity:
- "All H m #i. AuthenticPK(H, m) @ #i
- ==>
- (Ex I #j. Running_H_1(H, I, m) @ #j & #j < #i)"
-
-/* Injective agreement for secret code message exchange */
-lemma code_injective_agreement:
- "All H I t #i.
- Commit_H_1(H, I, t) @ i
- ==>
- (Ex #j. Running_I_1(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_1(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Injective agreement for Holder's pk message exchange */
-lemma pkH_injective_agreement:
- "All H I t #i.
- Commit_I_1(I, H, t) @ i
- ==>
- (Ex #j. Running_H_1(H, I, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_I_1(I2, H2, t) @i2
- & not (#i2 = #i)))"
-
-/* Credential secrecy from the point of view of an honest issuer */
-lemma cred_secrecy_issuer:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('I') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i)
- )"
-
-/* Credential secrecy from the point of view of an honest holder
- -- Requires also that the signing key of the issuer is not leaked! */
-lemma cred_secrecy_holder:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('H') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i)
- )"
-
-/* Credential authenticity: it comes from the Issuer
- -- unless issuer and receiving holder are compromised. */
-lemma cred_auth:
- "All H m #i. AuthenticCred(H, m) @i
- ==>
- ((Ex I #j. Running_I_2(I, H, m) @j & j < i) |
- (Ex I #j. LtksReveal(I) @j) |
- (Ex #j. LtkeReveal(H) @j))"
-
-/* Injective agreement for credential message exchange */
-lemma cred_injective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_2(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Non-Injective agreement for credential message exchange */
-lemma cred_noninjective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Weak agreement for credential message exchange */
-lemma cred_weak_agreement:
- "All H I t1 #i.
- Commit_H_2(H, I, t1) @i
- ==> (Ex t2 #j. Running_I_2(I, H, t2) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Aliveness for credential message exchange */
-lemma cred_aliveness:
- "All H I t #i.
- Commit_H_2(H, I, t) @i
- ==> (Ex id #j. Create(I, id) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-end
diff --git a/docs/research/formalizations/connector-flaw.spthy b/docs/research/formalizations/connector-flaw.spthy
deleted file mode 100644
index 2bc0b7fbfa..0000000000
--- a/docs/research/formalizations/connector-flaw.spthy
+++ /dev/null
@@ -1,299 +0,0 @@
-/**
- * Description: Model for e2e based on QR code sharing and ECIES
- * Illustrative flawed version based on naive Sign-then-Encrypt.
- * Date: 20210922
- * Authors: Jesus Diaz Vico, IOHK Atala and Research teams.
- */
-
-theory Connector
-begin
-
-/*
- The protocol we want to model is, in Alice and Bob notation, as follows:
-
- (I stands for Issuer, H for Holder, C for connector)
- ( => is secure channel; -> is normal channel )
-
- 1. I => H: tid, pk_iss, code // Shared via QR, i.e., secure channel
- 2. H -> C: , mac(pk_hol, code)
- 3. C -> I: mac(pk_hol, code)
- 4. I: Fetch code used for session tid
- Verify mac(pk_hol, code)
- r = Fresh(Z)
- k = (pk_hol)^r
- k_1 || k_2 = kdf(k)
- c = senc(cred, k_1)
- s = sig(h(c), sk_iss)
- m = mac(c, k_2)
- 5. I -> C:
- 6. C -> H:
- 7. H: k = (g^r)^u
- k_1 || k_2 = kdf(k)
- m' = mac(c, k_2)
- Assert m = m'
- verify(sig, h(c), pk_iss)?
- cred = sdec(c, k_1)
-
- Steps 1-3 are the QRdid protocol, whereby the Issuer gets assured that
- the holder's public key is pkH. It needs a secure channel.
-
- Steps 4-7 correspond to an execution of an ECIES protocol, whereby the
- Issuer leverages the holder's public key obtained in the previous steps.
-
- After this, the holder is assured that the credential s/he received comes
- from the legitimate issuer, whereas the issuer is assured that only the
- intended holder received it. Of course, unless any of them is compromised.
-
- Note: The connector is not modelled here because it is treated essentially
- as an untrusted party -- hence, it *is* the adversary, which is natively
- modelled by Tamarin as a Dolev-Yao adversary.
-
-*/
-
-builtins: hashing, symmetric-encryption, asymmetric-encryption, signing,
- diffie-hellman
-
-/* Note: splitL, splitR and merge functions are aimed to be used for
- splitting bitstrings to be used as keys. However, they do not model
- that leaking part of a key *does* leak much information about that
- key. Hence, for this modelling to be valid, we should check that all
- the larger bitstring and its splits are kept secret. */
-functions: kdf/1, splitL/1, splitR/1, merge/2, mac/2
-
-equations: merge(splitL(v),splitR(v)) = v
-
-/* Rule for sending a message through a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanOut_S:
- [ Out_S($A,$B,x) ]
- --[ ChanOut_S($A,$B,x) ]->
- [ Sec($A,$B,x) ]
-
-/* Rule for sending a message from a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanIn_S:
- [ Sec($A,$B,x) ]
- --[ ChanIn_S($A,$B,x) ]->
- [ In_S($A,$B,x) ]
-
-rule Register_pke:
- [ Fr(~x) ]
- --[ RegisteredE($A) ]->
- [ !Pke($A, 'g'^~x),
- !Ltke($A, ~x) ]
-
-rule Register_pks:
- [ Fr(~x) ]
- --[ RegisteredS($A) ]->
- [ !Pks($A, pk(~x)),
- !Ltks($A, ~x) ]
-
-rule Reveal_ltke:
- [ !Ltke(A, ltke) ]
- --[ LtkeReveal(A) ]->
- [ Out(ltke) ]
-
-rule Reveal_ltks:
- [ !Ltks(A, ltks) ]
- --[ LtksReveal(A) ]->
- [ Out(ltks) ]
-
-/* Rule for fetching an existing DH public key. */
-rule Get_pke:
- [ !Pke(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for fetching an existing public key for signature verification. */
-rule Get_pks:
- [ !Pks(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for initializing the issuer */
-rule Issuer_Init:
- [ Fr(~id), !Ltks($I, ~lkIs) ]
- --[ Create($I, ~id), Role('I') ]->
- [ St_Issuer_0($I, ~id, ~lkIs, pk(~lkIs)) ]
-
-/* Rule for initializing the holder */
-rule Holder_Init:
- [ Fr(~id), !Ltke($H, ~lkHe) ]
- --[ Create($H, ~id), Role('H') ]->
- [ St_Holder_0($H, ~id, ~lkHe, 'g'^~lkHe) ]
-
-/* The issuer shares the QR code via the secure channel. */
-rule Issuer_1:
- [ Fr(~code),
- Fr(~tid),
- St_Issuer_0($I, ~id, ~lkIs, pkIs) ]
- --[ Running_I_1($I, $H, <'H', 'I', <~tid, ~code, pk(~lkIs)>>) ]->
- [ Out_S($I, $H, <~tid, ~code, pkIs>),
- St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code) ]
-
-/* The holder receives the QR code through the secure channel, and responds. */
-rule Holder_2:
- let mc = mac(pkHe, ~code) in
- [ In_S($I, $H, <~tid, ~code, pkIs>),
- St_Holder_0($H, ~id, ~lkHe, pkHe) ]
- --[ SecretCode(~code), Honest($H), Honest($I),
- Commit_H_1($H, $I, <'H', 'I', <~tid, ~code, pkIs>>),
- Running_H_1($H, $I, <'H', 'I', <~tid, pkHe, mc>>)]->
- [ Out(<<~tid, pkHe>, mc>),
- St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs) ]
-
-/* The issuer receives back the MAC'ed QR code, via an insecure channel,
- plus the Holder's public key. */
-rule Issuer_3:
- let mc_pkh = mac(pkHe, ~code)
- k = kdf(pkHe^~r)
- k1 = splitL(k)
- k2 = splitR(k)
- ccred = senc(~cred, k1)
- mc_cred = mac(ccred, k2)
- sig = sign(h(~cred), ~lkIs)
- in
- [ St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code),
- In(<<~tid, pkHe>, mc_pkh>),
- Fr(~r), Fr(~cred) ]
- --[ SecretCode(~code), SecretCred(~cred), Role('I'),
- Honest($H), Honest($I),
- AuthenticPK($H, <'H', 'I', ~tid, pkHe, mc_pkh>),
- Commit_I_1($I, $H, <'H', 'I', <~tid, pkHe, mc_pkh>>),
- Running_I_2($I, $H, <'H', 'I', >) ]->
- [ Out() ]
-
-/* The holder receives the ECIES-encrypted message by the issuer, and
- decrypts it. */
-rule Holder_4:
- let k = kdf(R^~lkHe)
- k1 = splitL(k)
- k2 = splitR(k)
- cred = sdec(ccred, k1)
- mc_cred = mac(ccred, k2)
- in
- [ St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs),
- In() ]
- --[ Eq(verify(sig, h(cred), pkIs), true),
- SecretCred(cred),
- AuthenticCred($H, <'H', 'I', >),
- Honest($H), Honest($I), Role('H'),
- Commit_H_2($H, $I, <'H', 'I', >)]->
- []
-
-restriction Equality:
- "All x y #i. Eq(x,y) @i ==> x = y"
-
-/* Safety check */
-lemma executable:
- exists-trace
- "
- Ex I H idi idh #i1 #j1.
- Create(I, idi) @i1 & Create(H, idh) @j1
- & ( Ex m #i2 #j2. Running_I_1(I, H, m) @i2 & i1 < i2
- & Commit_H_1(H, I, m) @j2 & j1 < j2
- & ( Ex m2 #i3 #j3. Running_H_1(H, I, m2) @i3 & i2 < i3
- & Commit_I_1(I, H, m2) @j3 & j2 < j3
- & ( Ex m3 #i4 #j4. Running_I_2(I, H, m3) @i4 & i3 < i4
- & Commit_H_2(H, I, m3) @j4 & j3 < j4
- )
- )
- )
- "
-
-/* Secrecy of the code sent by the Issuer and back by the Holder */
-lemma code_secrecy:
- "All c #i. SecretCode(c) @ #i ==> not (Ex #j. K(c) @ #j)"
-
-/* Authenticity of the public key received by the Issuer from the Holder */
-lemma pkH_authenticity:
- "All H m #i. AuthenticPK(H, m) @ #i
- ==>
- (Ex I #j. Running_H_1(H, I, m) @ #j & #j < #i)"
-
-/* Injective agreement for secret code message exchange */
-lemma code_injective_agreement:
- "All H I t #i.
- Commit_H_1(H, I, t) @ i
- ==>
- (Ex #j. Running_I_1(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_1(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Injective agreement for Holder's pk message exchange */
-lemma pkH_injective_agreement:
- "All H I t #i.
- Commit_I_1(I, H, t) @ i
- ==>
- (Ex #j. Running_H_1(H, I, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_I_1(I2, H2, t) @i2
- & not (#i2 = #i)))"
-
-/* Credential secrecy from the point of view of an honest issuer */
-lemma cred_secrecy_issuer:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('I') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i)
- )"
-
-/* Credential secrecy from the point of view of an honest holder
- -- Requires also that the signing key of the issuer is not leaked! */
-lemma cred_secrecy_holder:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('H') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i)
- )"
-
-/* Credential authenticity: it comes from the Issuer
- -- unless issuer and receiving holder are compromised. */
-lemma cred_auth:
- "All H m #i. AuthenticCred(H, m) @i
- ==>
- ((Ex I #j. Running_I_2(I, H, m) @j & j < i) |
- (Ex I #j. LtksReveal(I) @j) |
- (Ex #j. LtkeReveal(H) @j))"
-
-/* Injective agreement for credential message exchange */
-lemma cred_injective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_2(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Non-Injective agreement for credential message exchange */
-lemma cred_noninjective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Weak agreement for credential message exchange */
-lemma cred_weak_agreement:
- "All H I t1 #i.
- Commit_H_2(H, I, t1) @i
- ==> (Ex t2 #j. Running_I_2(I, H, t2) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Aliveness for credential message exchange */
-lemma cred_aliveness:
- "All H I t #i.
- Commit_H_2(H, I, t) @i
- ==> (Ex id #j. Create(I, id) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-end
diff --git a/docs/research/formalizations/connector-sae.spthy b/docs/research/formalizations/connector-sae.spthy
deleted file mode 100644
index 3403b7b52b..0000000000
--- a/docs/research/formalizations/connector-sae.spthy
+++ /dev/null
@@ -1,299 +0,0 @@
-/**
- * Description: Model for e2e based on QR code sharing and ECIES.
- * Sign & Encrypt approach.
- * Date: 20210922
- * Authors: Jesus Diaz Vico, IOHK Atala and Research teams.
- */
-
-theory Connector
-begin
-
-/*
- The protocol we want to model is, in Alice and Bob notation, as follows:
-
- (I stands for Issuer, H for Holder, C for connector)
- ( => is secure channel; -> is normal channel )
-
- 1. I => H: tid, pk_iss, code // Shared via QR, i.e., secure channel
- 2. H -> C: , mac(pk_hol, code)
- 3. C -> I: mac(pk_hol, code)
- 4. I: Fetch code used for session tid
- Verify mac(pk_hol, code)
- r = Fresh(Z)
- k = (pk_hol)^r
- k_1 || k_2 = kdf(k)
- c = senc(cred, k_1)
- s = sig(h(cred, pkH), sk_iss)
- m = mac(c, k_2)
- 5. I -> C:
- 6. C -> H:
- 7. H: k = (g^r)^u
- k_1 || k_2 = kdf(k)
- m' = mac(c, k_2)
- Assert m' = m
- cred = sdec(c, k_1)
- verify(sig, h(cred, pkH), pk_iss)?
-
- Steps 1-3 are the QRdid protocol, whereby the Issuer gets assured that
- the holder's public key is pkH. It needs a secure channel.
-
- Steps 4-7 correspond to an execution of an ECIES protocol, whereby the
- Issuer leverages the holder's public key obtained in the previous steps.
-
- After this, the holder is assured that the credential s/he received comes
- from the legitimate issuer, whereas the issuer is assured that only the
- intended holder received it. Of course, unless any of them is compromised.
-
- Note: The connector is not modelled here because it is treated essentially
- as an untrusted party -- hence, it *is* the adversary, which is natively
- modelled by Tamarin as a Dolev-Yao adversary.
-
-*/
-
-builtins: hashing, symmetric-encryption, asymmetric-encryption, signing,
- diffie-hellman
-
-/* Note: splitL, splitR and merge functions are aimed to be used for
- splitting bitstrings to be used as keys. However, they do not model
- that leaking part of a key *does* leak much information about that
- key. Hence, for this modelling to be valid, we should check that all
- the larger bitstring and its splits are kept secret. */
-functions: kdf/1, splitL/1, splitR/1, merge/2, mac/2
-
-equations: merge(splitL(v),splitR(v)) = v
-
-/* Rule for sending a message through a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanOut_S:
- [ Out_S($A,$B,x) ]
- --[ ChanOut_S($A,$B,x) ]->
- [ Sec($A,$B,x) ]
-
-/* Rule for sending a message from a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanIn_S:
- [ Sec($A,$B,x) ]
- --[ ChanIn_S($A,$B,x) ]->
- [ In_S($A,$B,x) ]
-
-rule Register_pke:
- [ Fr(~x) ]
- --[ RegisteredE($A) ]->
- [ !Pke($A, 'g'^~x),
- !Ltke($A, ~x) ]
-
-rule Register_pks:
- [ Fr(~x) ]
- --[ RegisteredS($A) ]->
- [ !Pks($A, pk(~x)),
- !Ltks($A, ~x) ]
-
-rule Reveal_ltke:
- [ !Ltke(A, ltke) ]
- --[ LtkeReveal(A) ]->
- [ Out(ltke) ]
-
-rule Reveal_ltks:
- [ !Ltks(A, ltks) ]
- --[ LtksReveal(A) ]->
- [ Out(ltks) ]
-
-/* Rule for fetching an existing DH public key. */
-rule Get_pke:
- [ !Pke(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for fetching an existing public key for signature verification. */
-rule Get_pks:
- [ !Pks(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for initializing the issuer */
-rule Issuer_Init:
- [ Fr(~id), !Ltks($I, ~lkIs) ]
- --[ Create($I, ~id), Role('I') ]->
- [ St_Issuer_0($I, ~id, ~lkIs, pk(~lkIs)) ]
-
-/* Rule for initializing the holder */
-rule Holder_Init:
- [ Fr(~id), !Ltke($H, ~lkHe) ]
- --[ Create($H, ~id), Role('H') ]->
- [ St_Holder_0($H, ~id, ~lkHe, 'g'^~lkHe) ]
-
-/* The issuer shares the QR code via the secure channel. */
-rule Issuer_1:
- [ Fr(~code),
- Fr(~tid),
- St_Issuer_0($I, ~id, ~lkIs, pkIs) ]
- --[ Running_I_1($I, $H, <'H', 'I', <~tid, ~code, pk(~lkIs)>>) ]->
- [ Out_S($I, $H, <~tid, ~code, pkIs>),
- St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code) ]
-
-/* The holder receives the QR code through the secure channel, and responds. */
-rule Holder_2:
- let mc = mac(pkHe, ~code) in
- [ In_S($I, $H, <~tid, ~code, pkIs>),
- St_Holder_0($H, ~id, ~lkHe, pkHe) ]
- --[ SecretCode(~code), Honest($H), Honest($I),
- Commit_H_1($H, $I, <'H', 'I', <~tid, ~code, pkIs>>),
- Running_H_1($H, $I, <'H', 'I', <~tid, pkHe, mc>>)]->
- [ Out(<<~tid, pkHe>, mc>),
- St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs) ]
-
-/* The issuer receives back the MAC'ed QR code, via an insecure channel,
- plus the Holder's public key. */
-rule Issuer_3:
- let mc_pkh = mac(pkHe, ~code)
- k = kdf(pkHe^~r)
- k1 = splitL(k)
- k2 = splitR(k)
- ccred = senc(~cred, k1)
- mc_cred = mac(ccred, k2)
- sig = sign(h(<~cred, pkHe>), ~lkIs)
- in
- [ St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code),
- In(<<~tid, pkHe>, mc_pkh>),
- Fr(~r), Fr(~cred) ]
- --[ SecretCode(~code), SecretCred(~cred), Role('I'),
- Honest($H), Honest($I),
- AuthenticPK($H, <'H', 'I', ~tid, pkHe, mc_pkh>),
- Commit_I_1($I, $H, <'H', 'I', <~tid, pkHe, mc_pkh>>),
- Running_I_2($I, $H, <'H', 'I', >) ]->
- [ Out() ]
-
-/* The holder receives the ECIES-encrypted message by the issuer, and
- decrypts it. */
-rule Holder_4:
- let k = kdf(R^~lkHe)
- k1 = splitL(k)
- k2 = splitR(k)
- cred = sdec(ccred, k1)
- mc_cred = mac(ccred, k2)
- in
- [ St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs),
- In() ]
- --[ Eq(verify(sig, h(), pkIs), true),
- SecretCred(cred),
- AuthenticCred($H, <'H', 'I', >),
- Honest($H), Honest($I), Role('H'),
- Commit_H_2($H, $I, <'H', 'I', >)]->
- []
-
-restriction Equality:
- "All x y #i. Eq(x,y) @i ==> x = y"
-
-/* Safety check */
-lemma executable:
- exists-trace
- "
- Ex I H idi idh #i1 #j1.
- Create(I, idi) @i1 & Create(H, idh) @j1
- & ( Ex m #i2 #j2. Running_I_1(I, H, m) @i2 & i1 < i2
- & Commit_H_1(H, I, m) @j2 & j1 < j2
- & ( Ex m2 #i3 #j3. Running_H_1(H, I, m2) @i3 & i2 < i3
- & Commit_I_1(I, H, m2) @j3 & j2 < j3
- & ( Ex m3 #i4 #j4. Running_I_2(I, H, m3) @i4 & i3 < i4
- & Commit_H_2(H, I, m3) @j4 & j3 < j4
- )
- )
- )
- "
-
-/* Secrecy of the code sent by the Issuer and back by the Holder */
-lemma code_secrecy:
- "All c #i. SecretCode(c) @ #i ==> not (Ex #j. K(c) @ #j)"
-
-/* Authenticity of the public key received by the Issuer from the Holder */
-lemma pkH_authenticity:
- "All H m #i. AuthenticPK(H, m) @ #i
- ==>
- (Ex I #j. Running_H_1(H, I, m) @ #j & #j < #i)"
-
-/* Injective agreement for secret code message exchange */
-lemma code_injective_agreement:
- "All H I t #i.
- Commit_H_1(H, I, t) @ i
- ==>
- (Ex #j. Running_I_1(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_1(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Injective agreement for Holder's pk message exchange */
-lemma pkH_injective_agreement:
- "All H I t #i.
- Commit_I_1(I, H, t) @ i
- ==>
- (Ex #j. Running_H_1(H, I, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_I_1(I2, H2, t) @i2
- & not (#i2 = #i)))"
-
-/* Credential secrecy from the point of view of an honest issuer */
-lemma cred_secrecy_issuer:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('I') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i)
- )"
-
-/* Credential secrecy from the point of view of an honest holder
- -- Requires also that the signing key of the issuer is not leaked! */
-lemma cred_secrecy_holder:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('H') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i)
- )"
-
-/* Credential authenticity: it comes from the Issuer
- -- unless issuer and receiving holder are compromised. */
-lemma cred_auth:
- "All H m #i. AuthenticCred(H, m) @i
- ==>
- ((Ex I #j. Running_I_2(I, H, m) @j & j < i) |
- (Ex I #j. LtksReveal(I) @j) |
- (Ex #j. LtkeReveal(H) @j))"
-
-/* Injective agreement for credential message exchange */
-lemma cred_injective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_2(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Non-Injective agreement for credential message exchange */
-lemma cred_noninjective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Weak agreement for credential message exchange */
-lemma cred_weak_agreement:
- "All H I t1 #i.
- Commit_H_2(H, I, t1) @i
- ==> (Ex t2 #j. Running_I_2(I, H, t2) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Aliveness for credential message exchange */
-lemma cred_aliveness:
- "All H I t #i.
- Commit_H_2(H, I, t) @i
- ==> (Ex id #j. Create(I, id) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-end
diff --git a/docs/research/formalizations/connector.spthy b/docs/research/formalizations/connector.spthy
deleted file mode 100644
index 2b08cf757d..0000000000
--- a/docs/research/formalizations/connector.spthy
+++ /dev/null
@@ -1,308 +0,0 @@
-/**
- * Description: Model for e2e based on QR code sharing and ECIES
- * Sign-then-Encrypt approach.
- * Date: 20210922
- * Authors: Jesus Diaz Vico, IOHK Atala and Research teams.
- */
-
-theory Connector
-begin
-
-/*
- The protocol we want to model is, in Alice and Bob notation, as follows:
-
- (I stands for Issuer, H for Holder, C for connector)
- ( => is secure channel; -> is normal channel )
-
- 1. I => H: tid, pk_iss, code // Shared via QR, i.e., secure channel
- 2. H -> C: , mac(pk_hol, code)
- 3. C -> I: mac(pk_hol, code)
- 4. I: Fetch code used for session tid
- Verify mac(pk_hol, code)
- r = Fresh(Z)
- k = (pk_hol)^r
- k_1 || k_2 = kdf(k)
- data = (pk_iss, pk_hol, cred)
- s = sign(data, sk_iss)
- c = senc(data, s, k_1)
- m = mac(c, k_2)
- 5. I -> C:
- 6. C -> H:
- 7. H: k = (g^r)^u
- k_1 || k_2 = kdf(k)
- m' = mac(c, k_2)
- Assert m = m'
- data = sdec(c, k_1)
- Parse data as (pk_iss', pk_hol', cred)
- pk_iss' = pk_iss ?
- verify(sig, data, pk_iss)
- pk_hol' = pk_hol ?
-
- Steps 1-3 are the QRdid protocol, whereby the Issuer gets assured that
- the holder's public key is pkH. It needs a secure channel.
-
- Steps 4-7 correspond to an execution of an ECIES protocol, whereby the
- Issuer leverages the holder's public key obtained in the previous steps.
-
- After this, the holder is assured that the credential s/he received comes
- from the legitimate issuer, whereas the issuer is assured that only the
- intended holder received it. Of course, unless any of them is compromised.
-
- Note: The connector is not modelled here because it is treated essentially
- as an untrusted party -- hence, it *is* the adversary, which is natively
- modelled by Tamarin as a Dolev-Yao adversary.
-
-*/
-
-builtins: hashing, symmetric-encryption, asymmetric-encryption, signing,
- diffie-hellman
-
-/* Note: splitL, splitR and merge functions are aimed to be used for
- splitting bitstrings to be used as keys. However, they do not model
- that leaking part of a key *does* leak much information about that
- key. Hence, for this modelling to be valid, we should check that all
- the larger bitstring and its splits are kept secret. */
-functions: kdf/1, splitL/1, splitR/1, merge/2, mac/2
-
-equations: merge(splitL(v),splitR(v)) = v
-
-/* Rule for sending a message through a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanOut_S:
- [ Out_S($A,$B,x) ]
- --[ ChanOut_S($A,$B,x) ]->
- [ Sec($A,$B,x) ]
-
-/* Rule for sending a message from a secure (confidential & authenticated)
- channel. Used here for QR code sharing. */
-rule ChanIn_S:
- [ Sec($A,$B,x) ]
- --[ ChanIn_S($A,$B,x) ]->
- [ In_S($A,$B,x) ]
-
-rule Register_pke:
- [ Fr(~x) ]
- --[ RegisteredE($A) ]->
- [ !Pke($A, 'g'^~x),
- !Ltke($A, ~x) ]
-
-rule Register_pks:
- [ Fr(~x) ]
- --[ RegisteredS($A) ]->
- [ !Pks($A, pk(~x)),
- !Ltks($A, ~x) ]
-
-rule Reveal_ltke:
- [ !Ltke(A, ltke) ]
- --[ LtkeReveal(A) ]->
- [ Out(ltke) ]
-
-rule Reveal_ltks:
- [ !Ltks(A, ltks) ]
- --[ LtksReveal(A) ]->
- [ Out(ltks) ]
-
-/* Rule for fetching an existing DH public key. */
-rule Get_pke:
- [ !Pke(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for fetching an existing public key for signature verification. */
-rule Get_pks:
- [ !Pks(A, pk) ]
- -->
- [ Out(pk) ]
-
-/* Rule for initializing the issuer */
-rule Issuer_Init:
- [ Fr(~id), !Ltks($I, ~lkIs) ]
- --[ Create($I, ~id), Role('I') ]->
- [ St_Issuer_0($I, ~id, ~lkIs, pk(~lkIs)) ]
-
-/* Rule for initializing the holder */
-rule Holder_Init:
- [ Fr(~id), !Ltke($H, ~lkHe) ]
- --[ Create($H, ~id), Role('H') ]->
- [ St_Holder_0($H, ~id, ~lkHe, 'g'^~lkHe) ]
-
-/* The issuer shares the QR code via the secure channel. */
-rule Issuer_1:
- [ Fr(~code),
- Fr(~tid),
- St_Issuer_0($I, ~id, ~lkIs, pkIs) ]
- --[ Running_I_1($I, $H, <'H', 'I', <~tid, ~code, pk(~lkIs)>>) ]->
- [ Out_S($I, $H, <~tid, ~code, pkIs>),
- St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code) ]
-
-/* The holder receives the QR code through the secure channel, and responds. */
-rule Holder_2:
- let mc = mac(pkHe, ~code) in
- [ In_S($I, $H, <~tid, ~code, pkIs>),
- St_Holder_0($H, ~id, ~lkHe, pkHe) ]
- --[ SecretCode(~code), Honest($H), Honest($I),
- Commit_H_1($H, $I, <'H', 'I', <~tid, ~code, pkIs>>),
- Running_H_1($H, $I, <'H', 'I', <~tid, pkHe, mc>>)]->
- [ Out(<<~tid, pkHe>, mc>),
- St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs) ]
-
-/* The issuer receives back the MAC'ed QR code, via an insecure channel,
- plus the Holder's public key. */
-rule Issuer_3:
- let mc_pkh = mac(pkHe, ~code)
- k = kdf(pkHe^~r)
- k1 = splitL(k)
- k2 = splitR(k)
- cred_data =
- sig = sign(cred_data, ~lkIs)
- ccred = senc(, k1)
- mc_cred = mac(ccred, k2)
- in
- [ St_Issuer_1($I, ~id, ~lkIs, $H, ~tid, ~code),
- In(<<~tid, pkHe>, mc_pkh>),
- Fr(~r), Fr(~cred) ]
- --[ SecretCode(~code), SecretCred(~cred), Role('I'),
- Honest($H), Honest($I),
- AuthenticPK($H, <'H', 'I', ~tid, pkHe, mc_pkh>),
- Commit_I_1($I, $H, <'H', 'I', <~tid, pkHe, mc_pkh>>),
- Running_I_2($I, $H, <'H', 'I', >) ]->
- [ Out() ]
-
-/* The holder receives the ECIES-encrypted message by the issuer, and
- decrypts it. */
-rule Holder_4:
- let k = kdf(R^~lkHe)
- k1 = splitL(k)
- k2 = splitR(k)
- cred_data = fst(sdec(ccred, k1))
- cred = snd(snd(cred_data))
- sig = snd(sdec(ccred, k1))
- mc_cred = mac(ccred, k2)
- in
- [ St_Holder_1($H, ~id, ~lkHe, pkHe, $I, pkIs),
- In() ]
- --[ Eq(verify(sig, cred_data, pkIs), true),
- Eq(fst(cred_data), pkIs),
- Eq(fst(snd(cred_data)), pkHe),
- SecretCred(cred),
- AuthenticCred($H, <'H', 'I', >),
- Honest($H), Honest($I), Role('H'),
- Commit_H_2($H, $I, <'H', 'I', >)]->
- []
-
-restriction Equality:
- "All x y #i. Eq(x,y) @i ==> x = y"
-
-/* Safety check */
-lemma executable:
- exists-trace
- "
- Ex I H idi idh #i1 #j1.
- Create(I, idi) @i1 & Create(H, idh) @j1
- & ( Ex m #i2 #j2. Running_I_1(I, H, m) @i2 & i1 < i2
- & Commit_H_1(H, I, m) @j2 & j1 < j2
- & ( Ex m2 #i3 #j3. Running_H_1(H, I, m2) @i3 & i2 < i3
- & Commit_I_1(I, H, m2) @j3 & j2 < j3
- & ( Ex m3 #i4 #j4. Running_I_2(I, H, m3) @i4 & i3 < i4
- & Commit_H_2(H, I, m3) @j4 & j3 < j4
- )
- )
- )
- "
-
-/* Secrecy of the code sent by the Issuer and back by the Holder. */
-lemma code_secrecy:
- "All c #i. SecretCode(c) @ #i ==> not (Ex #j. K(c) @ #j)"
-
-/* Authenticity of the public key received by the Issuer from the Holder */
-lemma pkH_authenticity:
- "All H m #i. AuthenticPK(H, m) @ #i
- ==>
- (Ex I #j. Running_H_1(H, I, m) @ #j & #j < #i)"
-
-/* Injective agreement for secret code message exchange */
-lemma code_injective_agreement:
- "All H I t #i.
- Commit_H_1(H, I, t) @ i
- ==>
- (Ex #j. Running_I_1(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_1(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Injective agreement for Holder's pk message exchange */
-lemma pkH_injective_agreement:
- "All H I t #i.
- Commit_I_1(I, H, t) @ i
- ==>
- (Ex #j. Running_H_1(H, I, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_I_1(I2, H2, t) @i2
- & not (#i2 = #i)))"
-
-/* Credential secrecy from the point of view of an honest issuer */
-lemma cred_secrecy_issuer:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('I') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i)
- )"
-
-/* Credential secrecy from the point of view of an honest holder
- -- Requires also that the signing key of the issuer is not leaked! */
-lemma cred_secrecy_holder:
- all-traces
- "(All c #i. SecretCred(c) @ #i & Role('H') @ #i
- ==>
- (not (Ex #j. K(c) @ #j)) |
- (Ex H #j. LtkeReveal(H) @ #j & Honest(H) @ #i) |
- (Ex I #j. LtksReveal(I) @ #j & Honest(I) @ #i)
- )"
-
-/* Credential authenticity: it comes from the Issuer
- -- unless issuer and receiving holder are compromised. */
-lemma cred_auth:
- "All H m #i. AuthenticCred(H, m) @i
- ==>
- ((Ex I #j. Running_I_2(I, H, m) @j & j < i) |
- (Ex I #j. LtksReveal(I) @j) |
- (Ex #j. LtkeReveal(H) @j))"
-
-/* Injective agreement for credential message exchange */
-lemma cred_injective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j
- & j < i
- & not (Ex H2 I2 #i2. Commit_H_2(H2, I2, t) @i2
- & not (#i2 = #i)))"
-
-/* Non-Injective agreement for credential message exchange */
-lemma cred_noninjective_agreement:
- "All H I t #i.
- Commit_H_2(H, I, t) @ i
- ==>
- (Ex #j. Running_I_2(I, H, t) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Weak agreement for credential message exchange */
-lemma cred_weak_agreement:
- "All H I t1 #i.
- Commit_H_2(H, I, t1) @i
- ==> (Ex t2 #j. Running_I_2(I, H, t2) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-/* Aliveness for credential message exchange */
-lemma cred_aliveness:
- "All H I t #i.
- Commit_H_2(H, I, t) @i
- ==> (Ex id #j. Create(I, id) @j)
- | (Ex I #r. LtksReveal(I) @r & Honest(I) @i)
- | (Ex H #r. LtkeReveal(H) @r & Honest(H) @i)"
-
-end
diff --git a/docs/sdk/README.md b/docs/sdk/README.md
deleted file mode 100644
index 56d8db71c4..0000000000
--- a/docs/sdk/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# SDK
-This small doc aims to specify the approach we plan to take to build a decent one.
-
-
-## Goals
-- Expose the necessary functionality we already use on our different platforms.
-- Use a modular design, so that only the necessary pieces get integrated while building a project.
-- Support mainly Scala/JavaScript/Android/Java.
-- Include anything added to Mirror that gets abstract enough if we have the time.
-- Compile to a single Scala version (the one being used at the time in our projects).
-- Make sure to support anything required by the Georgia deployment.
-- Type checked docs (https://scalameta.org/mdoc/).
-- Docs to cover the main end to end flows (issue/verify/revoke a credential).
-- Build it as if the SDK was already open source.
-
-## Non-goals
-- Add new features that aren't properly justified.
-- Support iOS, even that most of the stuff is there, no core member is familiar enough with Swift to invest there now.
-- Idiomatic JavaScript/Java docs.
-- Prism Wallet SDK, it's not mature enough, and there isn't much we can do with it, we'd also need more tooling related to JavaScript (which needs research).
-
-## Misc stuff to consider before a proper release:
-- Update overall documentation, architecture, diagrams, etc
-- Analyze publishing js libraries to npm/github, if so, publish automatically on each release.
-- Publish the libraries automatically to maven central (https://github.com/olafurpg/sbt-ci-release).
-- Document how to use the APIs to run end to end flows in every supported language.
-- Document how to generate, and recover a wallet.
-- Document how to recover data from the encrypted data vault (to be defined)
-
-
-## Modules
-The suggested approach for its simplicity is to create a root level directory named `sdk`, containing a separate directory per sdk module. Every module would be a cross-compiled sbt project using git-based versioning.
-
-Note that while we have discussed the prism-node library, it doesn't provide much value besides the auto-generated client, hence, its excluded for now.
-
-Also, the SDK requires Android/Java bindings for all modules.
-
-
-### prism-crypto
-This is the lowest level module, the library scope being only the crypto primitives to power the system (signatures, encryption, hashing, etc), this module has heavy usage from Android, hence, Java/Android bindings are mandatory.
-
-
-### prism-protos
-Another lowest level module, grouping all the protobuf models, and providing the compiled versions of those.
-
-A potential risk is the compatibility from scalapb generated code to the Java libraries, which we will discover.
-
-
-### prism-identity
-This module fits on top of prism-crypto/prism-protos, providing anything related to DIDs, like generation/recovery/validation/etc.
-
-This is the place to hook the long-form unpublished DIDs, but, it doesn't involve any network related calls.
-
-
-### prism-credentials
-This module fits on top of prism-crypto/prism-identity, providing the necessary functionality related to credentials, like issuance/verification/revocation/etc, it's supposed to work without network calls.
-
-When we integrate dynamic credential templates/schema, this is going to be the place for those.
-
-
-### prism-connector
-The plan for this module is to fit on top of prism-crypto/prism-identity, providing any necessary stuff for invoking the node, like handling custom DID authentication schema so that users don't need to worry about it.
-
-Note that network calls aren't expected because it would be simpler to use the auto-generated clients in any language.
diff --git a/docs/sdk/diagrams/verify-credential.puml b/docs/sdk/diagrams/verify-credential.puml
deleted file mode 100644
index 5a27043c47..0000000000
--- a/docs/sdk/diagrams/verify-credential.puml
+++ /dev/null
@@ -1,67 +0,0 @@
-@startuml
-(*) --> "SDK Verify Credential"
-"SDK Verify Credential" --> "MerkleInclusionProof" #LightSkyBlue
-"SDK Verify Credential" --> "signedCredentialStringRepresentation" #LightSkyBlue
-if "Parse Credential Payload" then
- signedCredentialStringRepresentation -->[UnableToParseGivenSignedCredential] "ValidationResult"
-else
- -->[success] "Credential" #Orange
- "Credential" --> if "Extract IssuanceKeyId from Credential Payload" then
- -->[IssuerKeyIdNotFoundInCredential] "ValidationResult"
- else
- -->[success] if "Node.getDidDocument" then
- -->[IssuerDIDNotFoundOnChain] "ValidationResult"
- else
- -->[success] if "Extract IssuanceKeyId detail from Issuer DidDocument" then
- -->[IssuerKeyIdNotFoundOnChain] "ValidationResult"
- else
- -->[success] === q ===
- === q === --> if "Parse IssuingKey from IssuanceKeyId detail" then
- -->[OnChainIssuingKeyUnparsable] "ValidationResult"
- else
- -->[success] "IssuingKey" #Orange
- === q === --> if "Extract addedOn time from IssuanceKeyId detail" then
- -->[IssuanceKeyPublicationTimestampNotFoundOnChain] "ValidationResult"
- else
- -->[success] "IssuingKeyAddedOnTimestamp" #Orange
- === q === --> "MaybeIssuingKeyRevokedOnTimestamp" #Orange
- "Credential" --> if "Extract IssuerDID from Credential Payload" then
- -->[IssuerDIDNotFoundInCredential] "ValidationResult"
- else
- -->[success] === d ===
- === d === --> "Generate BatchId"
- if "nodeServiceApi.getBatchState" then
- -->[BatchNotFoundOnChain] "ValidationResult"
- else
- -->[success] === f ===
- === f === --> if "Extract published time from BatchState" then
- -->[BatchPublicationTimestampNotFoundOnChain] "ValidationResult"
- else
- -->[success] "BatchIssuedOnTimestamp" #Orange
- === f === --> "MaybeBatchRevokedOnTimestamp" #Orange
-
-
- === d === --> "nodeServiceApi.getCredentialRevocationTime"
- --> "Extract revokedOn time if present from CredentialRevocationTime if present"
- --> "MaybeCredentialRevocationTime"
-
- "MaybeBatchRevokedOnTimestamp"-->"credentialBatchNotRevoked"
- "IssuingKeyAddedOnTimestamp" --> "keyAddedBeforeIssuance"
- "BatchIssuedOnTimestamp" --> "keyAddedBeforeIssuance"
- "MaybeIssuingKeyRevokedOnTimestamp" --> "keyNotRevoked"
- "BatchIssuedOnTimestamp" --> "keyNotRevoked"
- "IssuingKey" --> "signatureIsValid"
- "MaybeCredentialRevocationTime" --> "individualCredentialNotRevoked"
- "Credential" --> "merkleProofIsValid"
- "MerkleInclusionProof" --> "merkleProofIsValid"
-
- "credentialBatchNotRevoked" #LightGreen --> [BatchWasRevoked(revokedOn: TimestampInfo)] "ValidationResult"
- "keyAddedBeforeIssuance" #LightGreen --> [KeyWasNotValid(keyAddedOn: TimestampInfo, credentialIssuedOn: TimestampInfo)] "ValidationResult"
- "keyNotRevoked" #LightGreen --> [KeyWasRevoked(credentialIssuedOn: TimestampInfo, keyRevokedOn: TimestampInfo)] "ValidationResult"
- "signatureIsValid" #LightGreen --> [InvalidSignature] "ValidationResult"
- "individualCredentialNotRevoked" #LightGreen -->[CredentialWasRevoked(revokedOn: TimestampInfo)] "ValidationResult"
- "merkleProofIsValid" #LightGreen -->[MerkleProofNotValid] "ValidationResult"
-
- "ValidationResult" #Orange --> (*)
-
-@enduml
\ No newline at end of file
diff --git a/docs/sdk/versioning-example.md b/docs/sdk/versioning-example.md
deleted file mode 100644
index 7396d64987..0000000000
--- a/docs/sdk/versioning-example.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-
-## [1.0.0] - Unreleased
-
-[1.0.0]: LINK TO THE GITHUB TAG
-
-### Added
-- **Maven and NPM artifacts are now available on Github Packages.**
-- A new proto class `AtalaErrorMessage` that represents a gRPC error. `AtalaMessage` can now contain `AtalaErrorMessage` as an underlying message.
-- Added a new snippet to Kotlin examples that was used for Africa Special video walkthrough.
-- A new method `EC.isSecp256k1` can be used to validate whether the point lies on the curve.
-- A new constant `DID.getMASTER_KEY_ID` is introduced.
-- A new convenience API `CreateContacts` for creating contacts in bulk from CSV has been introduced.
-- New ways to create DIDs from mnemonic phrase: `DID.deriveKeyFromFullPath` and `DID.createDIDFromMnemonic`.
-- [JS] Expose all the remaining pieces of SDK to JavaScript (including gRPC remoting).
-
-### Changed
-- **[BREAKING CHANGE]** All proto files have been converted to snake_case. This might affect you if you have been using the proto files directly (e.g., if you were auto-generating proto classes that are specific to your programming language). If you are using the supplied proto classes, nothing should change from your perspective.
-- **[BREAKING CHANGE]** `DID.stripPrismPrefix` was deleted as it was duplicating `DID.suffix` functionality.
-- **[BREAKING CHANGE]** `Credential.isSigned` and `Credentials.isUnverifiable` were replaced with `Credential.isVerifiable` that better suits most use cases.
-- **[BREAKING CHANGE]** `RegisterDIDRequest.createDidOperation` was replaced with `RegisterDIDRequest.registerWith`. You can still access the old behaviour by wrapping `SignedAtalaOperation` into `RegisterDIDRequest.RegisterWith.CreateDidOperation`.
-- **[BREAKING CHANGE]** All occurrences of `List` have been replaced with `ByteArray`.
-- **[BREAKING CHANGE]** [JS] JavaScript API has seen a major overhaul, it is much closer to the JVM one now. Most classes and methods have lost `JS` suffix at the end of their names and now accept proper classes instead of hex strings.
-- Integration tests module has been deleted. It was not being exposed to users, so nothing should change from user's perspective.
-- Kotlin version has been upgraded to 1.5.10. Users that are using 1.4.* should still be able to use the SDK without issues.
-- Documentation template for protobuf API has been improved.
-- README has been improved a lot:
- - Added a section on how to install all the prerequisites on Linux and macOS
- - Added a section on how to use it from JavaScript
-
-### Fixed
-- **[BREAKING CHANGE]** Due to the closure of Bintray, we have moved all custom artifacts to JFrog. Practically, this means users need to replace `https://dl.bintray.com/itegulov/maven` repository with `https://vlad107.jfrog.io/artifactory/default-maven-virtual/`.
-- Proper `equals` and `hashCode` for `DerivationAxis`.
-
-## [0.1.0] - 2021-04-27
-_Presumably already filled in_
diff --git a/docs/sdk/versioning.md b/docs/sdk/versioning.md
deleted file mode 100644
index 3371488201..0000000000
--- a/docs/sdk/versioning.md
+++ /dev/null
@@ -1,60 +0,0 @@
-# SDK Versioning
-This document describes Atala's approach to SDK versioning and its further evolution. Although some things that are going to be said in this document can be applied to all supported platforms, we are going to talk about Kotlin/JVM specifically.
-
-## Context
-Atix/PSG are going to start the development of MoE services very soon. Naturally, they will be using the PRISM SDK, and they need some guarantees of SDK's stability. Precisely, backwards compatibility should be kept for the length of the first stage of the project (~1.5 years).
-
-## Goal
-The goal of this documents is threefold:
-1. Define PRISM SDK's versioning scheme and what kind of backwards compatibility guarantees we provide between them depending on the version evolution.
-2. Provide some practical guidance on how to help us enforce these guarantees.
-3. Define where we are going to store the artifacts and how we are going to disseminate them.
-
-## Versioning Scheme
-We propose to use [semantic versioning](https://semver.org/). This means that each PRISM SDK's release version will look like MAJOR.MINOR.PATCH where incrementing:
-- MAJOR version means that non-backward compatible changes were introduced
-- MINOR version means that new functionality was added in backward compatible manner
-- PATCH version means that backward compatible bug fixes or security patches were applied
-
-Note that we will be using single MAJOR version of PRISM SDK during the entire first stage of MoE project.
-
-### Deprecation
-Deprecating existing functionality is a normal part of any development process. As such, MINOR releases can mark classes/methods as deprecated (without deleting them). Furthermore, we shall guarantee that there will be at least one MINOR release that deprecates a functionality before a MAJOR release that completely removes it.
-
-### Release Notes
-We propose to include the following into each version's release notes:
-- MAJOR releases will come with (relative to the last MAJOR release):
- - General overview of the release and extensive human-readable changelog
- - A full migration guide on removed/changed functionality
-- MINOR releases will come with (relative to the last MINOR release in the given MAJOR release):
- - A list of all deprecated classes/methods along with our suggestions on how to do the supported flows without them
- - A list of newly introduced classes/methods along with their proposed usages
-- PATCH releases will come out with a small changelist outlining the fixed bugs
-
-Release notes will be kept in CHANGELOG.md, the format of which will be based [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). Each new version's changelog will be added as a new entry to CHANELOG.md, timestamped by the UTC release date. Additionally, each new version will be accompanied with a GitHub tag that will contain a link to the relevant part of CHANGELOG.md.
-
-As the release can affect multiple platforms at once, the release note can have tags dedicated to each particular platform. For example, common API changes can have no tag at all, JVM changes can have "[JVM]" and JS changes "[JS]".
-
-See [versioning example](versioning-example.md) for reference.
-
-### Documentation
-We propose to maintain one documentation instance for the latest MINOR.PATCH release in each MAJOR branch (similarly to how [http4s](https://http4s.org/) does it). This will mean adapting our documentation website generation engine to support multi-version layout, design of which is out of this document's scope.
-
-### Transitive dependencies
-We need to carefully review all SDK's dependencies and mark them as `implementation` or `api` accordingly. This will make sure that only relevant dependencies are exposed to end-users transitively. For example, it makes sense to expose `pbandk` to provide protobuf primitives to the user, but it would not make sense to expose `bitcoin-kmp` along with our own cryptographic primitives.
-
-Marking dependencies is important as it will affect our ability to upgrade some dependencies in a given MAJOR branch. It is especially important in case of `pbandk` which auto-generates our protobuf models and gRPC services.
-
-## Practical Enforcements
-First, we need to make sure that nothing is exposed to the users unintentionally. Modern languages (such as Kotlin) have moved away from the original Java's visibility approach where you have to specify visibility modifiers explicitly if you want to expose the entity to users. This makes it easier to write your everyday backend code, but actually makes the life harder for library authors. Fortunately, Kotlin 1.4 introduced a new feature called [explicit API mode](https://kotlinlang.org/docs/whatsnew14.html#explicit-api-mode-for-library-authors). We propose to enable it for all SDK modules in strict mode.
-
-Second, after we have reviewed and frozen our public API, we need to make sure that the API does not change unintentionally. For this we propose to use [Kotlin binary compatibility validator](https://github.com/Kotlin/binary-compatibility-validator): a gradle plugin that can dump the binary API into a file and make sure that it is not affected by proposed changes.
-
-## Interoperability with Java
-As a part of our guarantee, we will also offer Java interoperability along with the Kotlin/JVM one. Practically, this means properly annotated static fields/methods (with `@JvmField`/`@JvmStatic`) and package-level functions.
-
-## Experimental API
-In the future we might offer experimental features that would not fall under our backward compatibility guarantees. Such API will be marked with a special [opt-in annotation](https://kotlinlang.org/docs/opt-in-requirements.html) that would give users a warning about this being an experimental feature.
-
-## Artifactory
-We will be producing Apache Maven artifacts that would need to be published on Apache Maven registry. [Github Packages](https://github.com/features/packages) offers [such a registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-apache-maven-registry). So it seems like a reasonable choice given that the artifacts are only meant to be consumed by internal IOHK developers and Github Packages provide organization-level privacy.
diff --git a/docs/signing.md b/docs/signing.md
deleted file mode 100644
index d5147b9ee9..0000000000
--- a/docs/signing.md
+++ /dev/null
@@ -1,39 +0,0 @@
-## Problems with signing structured data
-Signing data can be harder than one would imagine. The main problem is not with the signing algorithm itself but with assuring that the same exact data is used for signing and for signature verification. Just one byte differing will cause verification to fail.
-
-There are two ways of providing signature:
-1. Store it externally - for example just appending it to the document, using some kind of separator,
-2. Add it as a part of the document itself - this way the signature is computed for version of document **without the signature field appended**; this method is often used for JSON where we want to keep the document structure valid.
-
-The second distinction is on how to obtain the exact byte sequence used for signing in cases when the same value can be represented in many ways:
-1. Let signer choose the representation and just use the one provided during signature verification. This approach is problematic if we want to store the signature as part as the signed document: signature has to be removed from it during signing and such process may be ambiguous as well. There are solutions for that problem like signing document with signature field present, but containing some artificial value like zeros string with the length of a proper signature[1](#1).
-2. Generate canonical representation and use it for signing. Canonizalization can be hard to implement correctly, as edge cases can make rendering difficult[1](#1) (e.g. JSON object field ordering when unicode characters are present).
-3. Serialize data into bytes in a way completely unrelated to format used for storing it, e.g. using JSON for storage, but some kind of more unambiguous binary format for signing. This can be easier than canonicalization, but has downside of maintaining two serialization formats.
-
-## Common signature schema
-
-If we want to create a signature schema that could support many variants following design can be used:
-
-```
-type Enclosure
-
-enclose: T => Enclosure
-getSignedBytes: Enclosure => Array[Byte]
-compose: (Enclosure, Array[Byte]) => String
-decompose: String => (Enclosure, Array[Byte])
-disclose: Enclosure => T
-```
-
-`enclose` computes representation that allows easy computation of byte string representation used for signing. It also allows composing it with signature. `deenclose` is reverse process.
-
-`getSignedBytes` uses enclosed value to obtain bytes that should be signed.
-
-`compose` returns encoded representation with signature added. `decompose` extracts enclosed value and signature from encoded representation.
-
-Such model is general enough to be used in various encoding options:
-* When signature is stored externally, `Enclosure` might be just `Array[Byte]`. `enclose` / `deenclose` are serialization / deserialization operations, `compose` can mean concatenation of serialized value and signature.
-* When signature is stored as a part of document and we are relying on representation provided by the signer `Enclosure` might be a byte sequence with a placeholder to insert signature.
-* When signature is stored as a part of canonicalized document, `Enclosure` might be intermediate representation which allows easy rendering of canonicalized version, e.g. with keys sorted inside of JSON objects memory representation.
-
-## Footnotes
-1. How (not) to sign a JSON object https://latacora.micro.blog/2019/07/24/how-not-to.html
diff --git a/docs/wallet-backend/motivation.md b/docs/wallet-backend/motivation.md
deleted file mode 100644
index 29d6b3eb4f..0000000000
--- a/docs/wallet-backend/motivation.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Wallet Backend Motivation
-While designing gRPC-agnostic Node API in SDK we realized that a pure stateless SDK, despite being useful, does not cover the needs of most PRISM use cases. Every single application that wants to build on top of PRISM has to re-implement the same logic. Just to name a few: managing derived private keys along with their respective DIDs, keeping track of the last operations the user has done with one of their DIDs, subscribing to the relevant operations coming from the PRISM Node instance. This brings us to this document's topic of discussion: creating a standalone PRISM wallet backend that would provide a _stateful_ API where the user would not have to take care of many of the aforementioned minute details.
-
-This document will not provide any implementation insight, but rather discuss the problems in more details so that a separate upcoming design document can have something to base on. The following sections are going to outline the problems that the wallet backend will have to tackle.
-
-## Managing derived private keys and DIDs that contain them
-PRISM protocol follows a specific way one should derive their private keys depending on that key's usage and the index of a DID it belongs to. This topic is described in detail [here](../protocol/key-derivation.md#the-paths-used-in-our-protocol) and the reader is advised to familiarize with that document before continuing reading this section.
-
-Normally, when a user wants to govern their identity, they don't really care how the private keys are generated and what key index is assigned to them. What the user wants in the end is to issue or revoke credentials, the way it is done is not very relevant to them. I propose to analyze the user flows and come up with a straightforward API to managing one's DIDs. For example, it is likely the case that we do not need to expose the notion of a key index to the user. Maybe it would make sense to come up with an API where a user just asks to derive a DID that is capable of issuing and revoking credentials. And then they would be able to make very straightforward operations such as issue credentials (without specifying which key to use), rotate the keys (again, no need to specify which ones to rotate), disabling DID etc.
-
-Another important point in this section is the last used DID index. Wallet backend would have to keep track of it and save it to the Vault instance of their choosing (or maybe we want to support a wide, open-minded, range of storage facilities such as a generic S3 instance?). It would also have to be able to restore all DIDs given the root mnemonic code and Vault credentials.
-
-## Keeping track of the relevant on-chain data
-It is very important to know which operations the user submitted to the Node. Otherwise, the user will not be able to make any more operations as PRISM protocol requires submitting the last known operation hash when making a new operation in order to prevent [replay attacks](../protocol/protocol-v0.1.md#replay-attack). This means that wallet backend would need to have a reasonably scalable local persistent storage where it would store all operations it sent to PRISM Node and the ones it received from PRISM Node (see section TBD).
-
-## Keeping track of the relevant off-chain data
-There is a some data that is not supposed to be stored on-chain, but is very important to the holder of the wallet. Mainly, it is credentials' content. Wallet backend would need to be able to receive credentials, verify that the proof of their issuance was indeed posted on-chain and then save it to the local storage as well as to the connected Vault instance (if any). Similarly, issuer might want to store all credentials it has ever issued.
-
-## Subscribing to new operations pulled from Cardano ledger
-Sometimes the user is not the only owner of a DID and its credentials. Hence, when someone else makes updates to the DID or issues new credential batches using one of the controlled DIDs, the user wants to keep in sync with the new changes. To do this, wallet backend would have to [subscribe](../node/SubscriptionMechanism.md) to PRISM Node instance and listen for relevant operations.
-
-## Integration with connector
-As of point of me writing this, it is still unclear what role the connector is going to play in PRISM as a whole. Yet, communication is one of the important cornerstones of self-sovereign identity and hence wallet backend would ideally be able to handle communication with other parties in the system. Namely, keeping track of your current connections, receiving and storing messages, importing new connection tokens by external means etc. Coming up with a higher-level API for conveniently sharing/receiving credentials might potentially be beneficial as well.
-
-# Conclusion
-This proposal just scratches the very top of features we can/should implement with a standalone stateful PRISM wallet. If the wallet will ever see the light, it can be enhanced with multiple useful features such as auto-retrying rejected operations (for example if the wallet backend was not aware of some operation than happened shortly before we sent our failed operation), supporting [full-blown offline mode](../moe/full-offline-mode.md), notifying the user whenever someone makes unsolicited operations from their name and automatic key rotation (e.g. a mode that one can enable if they are sure that the DID will only ever be owned by one wallet).
diff --git a/prism-backend/node/infrastructure/charts/node/templates/stringsecret.yaml b/node/infrastructure/charts/node/templates/stringsecret.yaml
similarity index 100%
rename from prism-backend/node/infrastructure/charts/node/templates/stringsecret.yaml
rename to node/infrastructure/charts/node/templates/stringsecret.yaml
diff --git a/node/src/main/resources/application.conf b/node/src/main/resources/application.conf
index 53dc2db0e0..2df839162c 100644
--- a/node/src/main/resources/application.conf
+++ b/node/src/main/resources/application.conf
@@ -26,7 +26,7 @@ s3 {
keyPrefix = ${?NODE_S3_KEY_PREFIX}
}
-# Ledger, options: bitcoin, cardano, in-memory
+# Ledger, options: cardano, in-memory
ledger = "in-memory"
ledger = ${?NODE_LEDGER}
@@ -75,47 +75,6 @@ nodeExplorer {
whitelistDids += ${?NODE_EXPLORER_WHITELIST_DID}
}
-api {
- authTokens = [
- "ShVvJ11AlVhLYv7OBO9sY9AOz8D5FoWo",
- "TVjoJ4yXVFjrI1c6T6J1kSuQUxk9s7fI",
- "deNADa2y8Zxk34uyJQpmVG0ToWmweytV",
- "HV396mUEeLcOxwm2flUlmKFD3LJQS3oB",
- "k8zyNbGZyKeVeHaXxv3rW3AWWfFP60Yw",
- "fpGmZeRVswt2KHL4EWQPzuoYvWjALQH6",
- "Y82qhBN9oCNltqDQjqFD8xO3E7hrCpGh",
- "c3fMgQwjrB1QfmTJ3S2o4fdFH0g8xt1O",
- "YtC34xZ7XtvBpMDekLj24kLo845DRq45",
- "sIgaocwpaOMji6o9MvKWWR2rlX3Dw4r7",
- "hareZzVNChnkB4lnonxIM2Eq2qaV1EXy",
- "M0khg6mxgDX5wlsptPpwXUGny995k3St",
- "oPChvenEEjRA5jpEtfSb1VQ57lgAPacK",
- "V4kHtJaNovpGJ8txHlanwhZk2ObMLo3J",
- "gQiZMF7GkrxfHbAmDCLJvsbRWx7liCWc",
- "S9LIwGNsQgl9KIVPY2opf7pH7hSYk4dS",
- "ScfrRRewF53q7BKWbHVWepBbY6dy4nSC",
- "rFPeLaHZhZ2GW2sISG9Fu9VaGcQmTTCo",
- "1aJ7D71rcVpX2HCO6IAhhh9D3pNa4rhm",
- "pg3fBuFTzrkYg0N87oYai0wzLbqg81V1",
- "ZWQzkFoM0RrBlUtGLtdPGZcpDN6Ou9FF",
- "URMoomuOUgPdBvi289U2b6lkO0CuhS1I",
- "UHt2vVi0qVi0ZyLhhsjWMaY2pElMHJQJ",
- "DbbTyo0RlKyoF8qnDXjjMwsA1k2wFEER",
- "BZIsOLAvNL8hUKs7PketgY0cYiDf2Usl",
- "QiljU5ggQ0PyWwkrWDmLGsZabSG83pnS",
- "zup7PQEIIz4zqmRXBOUYnIptDpEyT1D2",
- "9iT8rGs5Qeb0QU3jMqKT25LQuu1zvgJj",
- "xCgPgqsq4mXUmmC6l7fl1Bj0XjxK5n2Q",
- "uH6k55UwTYZ8Yr3snqXtLQXiPdJHUwEK",
- "Nelprl0D0yKCwTs097VTbEOKtRsnrp4l",
- "dmsKefghC7XMb4rB7VtUQZj98uBfiSmV",
- "QhmYBNNjqwiEKWIkBV9P4utz4wTsD76z"
- ]
- authTokens += ${?AUTH_TOKENS}
- authEnabled = false
- authEnabled = ${?AUTH_ENABLED}
-}
-
# CardanoLedgerService
cardano {
# Network connecting to, options: testnet, mainnet
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/NodeApp.scala b/node/src/main/scala/io/iohk/atala/prism/node/NodeApp.scala
index 436f505162..573f6cb244 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/NodeApp.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/NodeApp.scala
@@ -6,19 +6,14 @@ import cats.implicits.toFunctorOps
import com.typesafe.config.{Config, ConfigFactory}
import doobie.hikari.HikariTransactor
import io.grpc.{Server, ServerBuilder}
-import io.iohk.atala.prism.auth.WhitelistedAuthenticatorF
-import io.iohk.atala.prism.auth.grpc.{
- AuthorizationInterceptor,
- GrpcAuthenticatorInterceptor,
- TraceExposeInterceptor,
- TraceReadInterceptor
-}
-import io.iohk.atala.prism.auth.utils.DidWhitelistLoader
+import io.iohk.atala.prism.node.auth.WhitelistedAuthenticatorF
+import io.iohk.atala.prism.node.auth.grpc.{GrpcAuthenticatorInterceptor, TraceExposeInterceptor, TraceReadInterceptor}
+import io.iohk.atala.prism.node.auth.utils.DidWhitelistLoader
import io.iohk.atala.prism.identity.PrismDid
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.metrics.UptimeReporter
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.metrics.UptimeReporter
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.cardano.CardanoClient
import io.iohk.atala.prism.node.metrics.NodeReporter
import io.iohk.atala.prism.node.operations.ApplyOperationConfig
@@ -27,8 +22,8 @@ import io.iohk.atala.prism.node.services.CardanoLedgerService.CardanoBlockHandle
import io.iohk.atala.prism.node.services._
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
import io.iohk.atala.prism.protos.node_api._
-import io.iohk.atala.prism.repositories.{SchemaMigrations, TransactorFactory}
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.repositories.{SchemaMigrations, TransactorFactory}
+import io.iohk.atala.prism.node.utils.IOUtils._
import kamon.Kamon
import kamon.module.Module
import org.slf4j.LoggerFactory
@@ -150,7 +145,7 @@ class NodeApp(executionContext: ExecutionContext) { self =>
nodeExplorerDids
)
nodeGrpcService = new NodeGrpcServiceImpl(nodeService)
- server <- startServer(nodeGrpcService, nodeExplorerGrpcService, globalConfig)
+ server <- startServer(nodeGrpcService, nodeExplorerGrpcService)
} yield (submissionSchedulingService, server)
}
@@ -256,8 +251,7 @@ class NodeApp(executionContext: ExecutionContext) { self =>
private def startServer(
nodeService: NodeGrpcServiceImpl,
- nodeExplorerService: NodeExplorerGrpcServiceImpl,
- globalConfig: Config
+ nodeExplorerService: NodeExplorerGrpcServiceImpl
): Resource[IO, Server] =
Resource.make[IO, Server](IO {
logger.info("Starting server")
@@ -266,7 +260,6 @@ class NodeApp(executionContext: ExecutionContext) { self =>
.forPort(NodeApp.port)
.intercept(new TraceExposeInterceptor)
.intercept(new TraceReadInterceptor)
- .intercept(new AuthorizationInterceptor(globalConfig))
.intercept(new GrpcAuthenticatorInterceptor)
.addService(NodeServiceGrpc.bindService(nodeService, executionContext))
.addService(NodeExplorerServiceGrpc.bindService(nodeExplorerService, executionContext))
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/NodeConfig.scala b/node/src/main/scala/io/iohk/atala/prism/node/NodeConfig.scala
index 8323acec8d..0c9dfab0c8 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/NodeConfig.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/NodeConfig.scala
@@ -7,7 +7,7 @@ import io.iohk.atala.prism.node.cardano.dbsync.CardanoDbSyncClient
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient
import io.iohk.atala.prism.node.services.CardanoLedgerService
import io.iohk.atala.prism.node.services.CardanoLedgerService.CardanoNetwork
-import io.iohk.atala.prism.repositories.TransactorFactory
+import io.iohk.atala.prism.node.repositories.TransactorFactory
import sttp.model.Header
import scala.util.Try
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerAuthenticator.scala b/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerAuthenticator.scala
index d029a63970..7e77988a5c 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerAuthenticator.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerAuthenticator.scala
@@ -1,8 +1,8 @@
package io.iohk.atala.prism.node
-import io.iohk.atala.prism.auth.{WhitelistedAuthHelper, model}
+import io.iohk.atala.prism.node.auth.{WhitelistedAuthHelper, model}
import io.iohk.atala.prism.identity.PrismDid
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.repositories.RequestNoncesRepository
class NodeExplorerAuthenticator(
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerGrpcServiceImpl.scala b/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerGrpcServiceImpl.scala
index f617927fb0..4880ab21dd 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerGrpcServiceImpl.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/NodeExplorerGrpcServiceImpl.scala
@@ -3,14 +3,14 @@ package io.iohk.atala.prism.node
import cats.effect.unsafe.IORuntime
import cats.syntax.traverse._
import com.google.protobuf.ByteString
-import io.iohk.atala.prism.auth.WhitelistedAuthenticatorF
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticationHeaderParser.grpcHeader
+import io.iohk.atala.prism.node.auth.WhitelistedAuthenticatorF
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticationHeaderParser.grpcHeader
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.metrics.RequestMeasureUtil
-import io.iohk.atala.prism.metrics.RequestMeasureUtil.measureRequestFuture
-import io.iohk.atala.prism.models.TransactionId
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.metrics.RequestMeasureUtil
+import io.iohk.atala.prism.node.metrics.RequestMeasureUtil.measureRequestFuture
+import io.iohk.atala.prism.node.models.TransactionId
import io.iohk.atala.prism.node.NodeExplorerGrpcServiceImpl.{countAndThrowNodeError, serviceName}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.metrics.StatisticsCounters
@@ -25,8 +25,8 @@ import io.iohk.atala.prism.protos.node_api.GetScheduledOperationsRequest.Operati
UpdateDidOperationOperationType
}
import io.iohk.atala.prism.protos.node_api._
-import io.iohk.atala.prism.tracing.Tracing.trace
-import io.iohk.atala.prism.utils.FutureEither.FutureEitherOps
+import io.iohk.atala.prism.node.tracing.Tracing.trace
+import io.iohk.atala.prism.node.utils.FutureEither.FutureEitherOps
import scalapb.GeneratedMessage
import scala.concurrent.{ExecutionContext, Future}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/NodeGrpcServiceImpl.scala b/node/src/main/scala/io/iohk/atala/prism/node/NodeGrpcServiceImpl.scala
index 3e5db21e89..9e86cb8f86 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/NodeGrpcServiceImpl.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/NodeGrpcServiceImpl.scala
@@ -4,10 +4,10 @@ import cats.effect.unsafe.IORuntime
import com.google.protobuf.ByteString
import io.grpc.Status
import io.iohk.atala.prism.BuildInfo
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.metrics.RequestMeasureUtil
-import io.iohk.atala.prism.metrics.RequestMeasureUtil.measureRequestFuture
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.metrics.RequestMeasureUtil
+import io.iohk.atala.prism.node.metrics.RequestMeasureUtil.measureRequestFuture
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.grpc.ProtoCodecs
import io.iohk.atala.prism.node.models.AtalaObjectTransactionSubmissionStatus.InLedger
@@ -21,8 +21,8 @@ import io.iohk.atala.prism.node.services._
import io.iohk.atala.prism.protos.common_models.{HealthCheckRequest, HealthCheckResponse}
import io.iohk.atala.prism.protos.node_api._
import io.iohk.atala.prism.protos.{common_models, node_api}
-import io.iohk.atala.prism.tracing.Tracing._
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.tracing.Tracing._
+import io.iohk.atala.prism.node.utils.syntax._
import org.slf4j.{Logger, LoggerFactory}
import scala.concurrent.{ExecutionContext, Future}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/UnderlyingLedger.scala b/node/src/main/scala/io/iohk/atala/prism/node/UnderlyingLedger.scala
index 1dace33285..6e34051d3a 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/UnderlyingLedger.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/UnderlyingLedger.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node
import derevo.derive
import derevo.tagless.applyK
-import io.iohk.atala.prism.models.{Ledger, TransactionDetails, TransactionId, TransactionInfo, TransactionStatus}
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import io.iohk.atala.prism.node.models.Balance
import io.iohk.atala.prism.protos.node_internal
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/AuthHelper.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthHelper.scala
similarity index 84%
rename from common/src/main/scala/io/iohk/atala/prism/auth/AuthHelper.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/AuthHelper.scala
index 92988892be..17b34fb4cf 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/AuthHelper.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthHelper.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism.auth
+package io.iohk.atala.prism.node.auth
-import io.iohk.atala.prism.auth.errors.AuthError
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.errors.AuthError
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.identity.{PrismDid => DID}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorF.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorF.scala
similarity index 98%
rename from common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorF.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorF.scala
index 0468092e4c..4d1fd105f5 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorF.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorF.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth
+package io.iohk.atala.prism.node.auth
import cats.data.EitherT
import cats.effect.Resource
@@ -8,13 +8,13 @@ import cats.syntax.traverse._
import cats.{Applicative, Comonad, Functor, Monad, MonadThrow}
import derevo.derive
import derevo.tagless.applyK
-import io.iohk.atala.prism.auth.errors._
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticationHeader
-import io.iohk.atala.prism.auth.utils.DIDUtils
+import io.iohk.atala.prism.node.auth.errors._
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.signature.ECSignature
import io.iohk.atala.prism.identity.{PrismDid => DID}
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticationHeader
+import io.iohk.atala.prism.node.auth.utils.DIDUtils
import io.iohk.atala.prism.protos.node_api
import scalapb.GeneratedMessage
import tofu.Execute
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorFLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorFLogs.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorFLogs.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorFLogs.scala
index a8620b601d..e86f2e9f13 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/AuthenticatorFLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/AuthenticatorFLogs.scala
@@ -1,9 +1,9 @@
-package io.iohk.atala.prism.auth
+package io.iohk.atala.prism.node.auth
import cats.MonadThrow
import cats.syntax.applicativeError._
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticationHeader
-import io.iohk.atala.prism.logging.GeneralLoggableInstances._
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticationHeader
+import io.iohk.atala.prism.node.logging.GeneralLoggableInstances._
import io.iohk.atala.prism.identity.PrismDid
import scalapb.GeneratedMessage
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/auth/errors/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/errors/package.scala
new file mode 100644
index 0000000000..784448f76d
--- /dev/null
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/errors/package.scala
@@ -0,0 +1,36 @@
+package io.iohk.atala.prism.node.auth
+
+import derevo.derive
+import io.grpc.Status
+import io.iohk.atala.prism.node.errors.PrismError
+import tofu.logging.derivation.loggable
+
+package object errors {
+ @derive(loggable)
+ sealed trait AuthError extends PrismError
+
+ final case class SignatureVerificationError() extends AuthError {
+ override def toStatus: Status = {
+ Status.UNAUTHENTICATED.withDescription("Signature Invalid")
+ }
+ }
+
+ final case class UnknownPublicKeyId() extends AuthError {
+ override def toStatus: Status = {
+ Status.UNAUTHENTICATED.withDescription("Unknown public key id")
+ }
+ }
+
+ case object NoCreateDidOperationError extends AuthError {
+ override def toStatus: Status = {
+ Status.UNAUTHENTICATED.withDescription(
+ "Encoded operation does not create a fresh PrismDid"
+ )
+ }
+ }
+
+ final case class InvalidRequest(reason: String) extends AuthError {
+ def toStatus: Status = Status.INVALID_ARGUMENT.withDescription(reason)
+ }
+
+}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationContext.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationContext.scala
similarity index 97%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationContext.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationContext.scala
index 4c8d4b2229..4f22bea198 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationContext.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationContext.scala
@@ -1,14 +1,13 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import java.util.Base64
import io.grpc.{Context, Metadata}
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.signature.ECSignature
-import io.iohk.atala.prism.auth.model.{AuthToken, RequestNonce}
+import io.iohk.atala.prism.node.auth.model.{AuthToken, RequestNonce}
import io.iohk.atala.prism.identity.{CanonicalPrismDid, LongFormPrismDid, PrismDid}
-
import scala.util.{Failure, Success, Try}
-import io.iohk.atala.prism.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId
private[grpc] object GrpcAuthenticationContext {
// Extension methods to deal with gRPC Metadata in the Scala way
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeader.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeader.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeader.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeader.scala
index 69346f9559..a9fdba4f93 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeader.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeader.scala
@@ -1,10 +1,10 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import java.util.Base64
import io.grpc.Metadata
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.signature.ECSignature
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.identity.{PrismDid => DID}
sealed trait GrpcAuthenticationHeader {
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeaderParser.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeaderParser.scala
similarity index 88%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeaderParser.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeaderParser.scala
index cfaa6f9482..a7c7b3a235 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticationHeaderParser.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticationHeaderParser.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import io.grpc.Context
-import io.iohk.atala.prism.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId
trait GrpcAuthenticationHeaderParser {
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticatorInterceptor.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticatorInterceptor.scala
similarity index 93%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticatorInterceptor.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticatorInterceptor.scala
index a43b5d12e7..32d4ea5be4 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcAuthenticatorInterceptor.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcAuthenticatorInterceptor.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import io.grpc._
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcMetadataContextKeys.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcMetadataContextKeys.scala
similarity index 92%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcMetadataContextKeys.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcMetadataContextKeys.scala
index a9166f2bea..79a8960839 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/GrpcMetadataContextKeys.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/GrpcMetadataContextKeys.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import io.grpc.{Context, Metadata}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceExposeInterceptor.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceExposeInterceptor.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceExposeInterceptor.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceExposeInterceptor.scala
index 3c54691742..a00f6d9b78 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceExposeInterceptor.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceExposeInterceptor.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import io.grpc.ForwardingServerCall.SimpleForwardingServerCall
import io.grpc.{Metadata, ServerCall, ServerCallHandler, ServerInterceptor}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceReadInterceptor.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceReadInterceptor.scala
similarity index 91%
rename from common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceReadInterceptor.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceReadInterceptor.scala
index 5324747ebc..0edae3464f 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/grpc/TraceReadInterceptor.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/grpc/TraceReadInterceptor.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.grpc
+package io.iohk.atala.prism.node.auth.grpc
import io.grpc.{Contexts, Metadata, ServerCall, ServerCallHandler, ServerInterceptor}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/model/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/model/package.scala
similarity index 91%
rename from common/src/main/scala/io/iohk/atala/prism/auth/model/package.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/model/package.scala
index f518779657..7c39ff7ece 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/model/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/model/package.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth
+package io.iohk.atala.prism.node.auth
import java.util.UUID
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/utils/DIDUtils.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DIDUtils.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/auth/utils/DIDUtils.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DIDUtils.scala
index 0e602ed73c..0735177a7f 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/utils/DIDUtils.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DIDUtils.scala
@@ -1,17 +1,17 @@
-package io.iohk.atala.prism.auth.utils
+package io.iohk.atala.prism.node.auth.utils
-import io.iohk.atala.prism.auth.errors._
+import io.iohk.atala.prism.node.auth.errors._
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.identity.{LongFormPrismDid, PrismDid => DID}
-import io.iohk.atala.prism.interop.toScalaProtos._
+import io.iohk.atala.prism.node.interop.toScalaProtos._
+import io.iohk.atala.prism.node.utils.FutureEither
import io.iohk.atala.prism.protos.AtalaOperation.Operation.CreateDid
import io.iohk.atala.prism.protos.node_models
import io.iohk.atala.prism.protos.node_models.DIDData
import io.iohk.atala.prism.protos.node_models.PublicKey.KeyData.{CompressedEcKeyData, EcKeyData, Empty}
-import io.iohk.atala.prism.utils.FutureEither
-import io.iohk.atala.prism.utils.FutureEither.FutureEitherOps
+import io.iohk.atala.prism.node.utils.FutureEither.FutureEitherOps
import scala.concurrent.{ExecutionContext, Future}
diff --git a/common/src/main/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoader.scala b/node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoader.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoader.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoader.scala
index 26d56e3d48..abe133a1ae 100644
--- a/common/src/main/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoader.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoader.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.utils
+package io.iohk.atala.prism.node.auth.utils
import com.typesafe.config.Config
import io.iohk.atala.prism.identity.{PrismDid => DID}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/CardanoClient.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/CardanoClient.scala
index a00abc5095..4c6abc6746 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/CardanoClient.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/CardanoClient.scala
@@ -3,20 +3,18 @@ package io.iohk.atala.prism.node.cardano
import cats.{Comonad, Functor}
import cats.effect.Resource
import cats.syntax.comonad._
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId, WalletDetails}
import io.iohk.atala.prism.node.cardano.dbsync.CardanoDbSyncClient
import io.iohk.atala.prism.node.cardano.models._
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient
-import io.iohk.atala.prism.node.models.WalletDetails
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monadic._
import cats.syntax.either._
import derevo.derive
import derevo.tagless.applyK
import io.iohk.atala.prism.node.cardano.logs.CardanoClientLogs
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
import tofu.higherKind.Mid
-
import cats.MonadThrow
import cats.effect.kernel.Async
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/CardanoDbSyncClient.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/CardanoDbSyncClient.scala
index 6f118bae5e..557f59bf20 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/CardanoDbSyncClient.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/CardanoDbSyncClient.scala
@@ -2,8 +2,8 @@ package io.iohk.atala.prism.node.cardano.dbsync
import cats.Comonad
import cats.effect.{Async, Resource}
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.repositories.TransactorFactory
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.repositories.TransactorFactory
import io.iohk.atala.prism.node.cardano.dbsync.repositories.CardanoBlockRepository
import io.iohk.atala.prism.node.cardano.models.{Block, BlockError}
import tofu.logging.Logs
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepository.scala
index d7deb9f6c9..b2a3b9f0d3 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepository.scala
@@ -8,8 +8,8 @@ import derevo.derive
import derevo.tagless.applyK
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import io.iohk.atala.prism.node.cardano.dbsync.repositories.daos.{BlockDAO, TransactionDAO}
import io.iohk.atala.prism.node.cardano.dbsync.repositories.logs.CardanoBlockRepositoryLogs
import io.iohk.atala.prism.node.cardano.dbsync.repositories.metrics.CardanoBlockRepositoryMetrics
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/daos/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/daos/package.scala
index 1cb71caef9..4e1849fb49 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/daos/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/daos/package.scala
@@ -5,8 +5,8 @@ import java.time.Instant
import doobie.{Get, Read}
import doobie.implicits.legacy.instant._
import io.circe.Json
-import io.iohk.atala.prism.daos.BaseDAO
-import io.iohk.atala.prism.models.TransactionId
+import io.iohk.atala.prism.node.repositories.daos.BaseDAO
+import io.iohk.atala.prism.node.models.TransactionId
import io.iohk.atala.prism.node.cardano.models.{BlockHash, BlockHeader, Transaction, TransactionMetadata}
package object daos extends BaseDAO {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/metrics/CardanoBlockRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/metrics/CardanoBlockRepositoryMetrics.scala
index bd3695af55..b923472ae7 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/metrics/CardanoBlockRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/metrics/CardanoBlockRepositoryMetrics.scala
@@ -1,8 +1,8 @@
package io.iohk.atala.prism.node.cardano.dbsync.repositories.metrics
import cats.effect.kernel.MonadCancel
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.{DomainTimer, MeasureOps}
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.{DomainTimer, MeasureOps}
import io.iohk.atala.prism.node.cardano.dbsync.repositories.CardanoBlockRepository
import io.iohk.atala.prism.node.cardano.models.{Block, BlockError}
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/logs/CardanoClientLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/logs/CardanoClientLogs.scala
index bebb0ce47f..c3eec2d7e8 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/logs/CardanoClientLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/logs/CardanoClientLogs.scala
@@ -3,7 +3,7 @@ package io.iohk.atala.prism.node.cardano.logs
import cats.syntax.apply._
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.CardanoClient
import io.iohk.atala.prism.node.cardano.models._
import io.iohk.atala.prism.node.models.WalletDetails
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/AtalaObjectMetadata.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/AtalaObjectMetadata.scala
index 50256f7218..2dedd4ec0a 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/AtalaObjectMetadata.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/AtalaObjectMetadata.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.cardano.models
import io.circe.{ACursor, Json}
import io.iohk.atala.prism.protos.node_internal
-import io.iohk.atala.prism.utils.BytesOps
+import io.iohk.atala.prism.node.utils.BytesOps
import scala.util.Try
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/BlockHash.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/BlockHash.scala
index 29aac6b30b..5d07c7d338 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/BlockHash.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/BlockHash.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.cardano.models
import com.typesafe.config.ConfigMemorySize
-import io.iohk.atala.prism.models.{HashValue, HashValueConfig, HashValueFrom}
+import io.iohk.atala.prism.node.models.{HashValue, HashValueConfig, HashValueFrom}
import tofu.logging.{DictLoggable, LogRenderer}
import scala.collection.compat.immutable.ArraySeq
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/Transaction.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/Transaction.scala
index 539c464024..32f1f133e4 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/Transaction.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/Transaction.scala
@@ -1,6 +1,6 @@
package io.iohk.atala.prism.node.cardano.models
-import io.iohk.atala.prism.models.TransactionId
+import io.iohk.atala.prism.node.models.TransactionId
case class Transaction(
id: TransactionId,
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/WalletId.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/WalletId.scala
index 5e6c7fc39d..c010027782 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/WalletId.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/models/WalletId.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.cardano.models
import com.typesafe.config.ConfigMemorySize
-import io.iohk.atala.prism.models.{HashValue, HashValueConfig, HashValueFrom}
+import io.iohk.atala.prism.node.models.{HashValue, HashValueConfig, HashValueFrom}
import tofu.logging.{DictLoggable, LogRenderer}
import scala.collection.compat.immutable.ArraySeq
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClient.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClient.scala
index d4b22d157b..d7eee4f2f2 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClient.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClient.scala
@@ -7,8 +7,8 @@ import cats.syntax.functor._
import cats.{Applicative, Comonad, Functor}
import derevo.derive
import derevo.tagless.applyK
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.models.{Lovelace, Payment, TransactionMetadata, WalletId}
import io.iohk.atala.prism.node.cardano.wallet.api.ApiClient
import io.iohk.atala.prism.node.cardano.wallet.logs.CardanoWalletApiClientLogs
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiClient.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiClient.scala
index 810ea66724..2e46368917 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiClient.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiClient.scala
@@ -5,7 +5,7 @@ import cats.effect.{Async, Resource}
import sttp.client3._
import io.circe.parser.parse
import io.circe.{Decoder, Json}
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.models.{Payment, TransactionMetadata, WalletId}
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient.{
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiRequest.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiRequest.scala
index 21f40e76af..449baa93ba 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiRequest.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/ApiRequest.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.cardano.wallet.api
import io.circe.syntax._
import io.circe.{Encoder, Json}
-import io.iohk.atala.prism.models.TransactionId
+import io.iohk.atala.prism.node.models.TransactionId
import io.iohk.atala.prism.node.cardano.models.{Payment, TransactionMetadata, WalletId}
import io.iohk.atala.prism.node.cardano.wallet.api.JsonCodecs._
import sttp.model.Method
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/JsonCodecs.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/JsonCodecs.scala
index a436f8777c..562eb421de 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/JsonCodecs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/api/JsonCodecs.scala
@@ -2,11 +2,10 @@ package io.iohk.atala.prism.node.cardano.wallet.api
import io.circe._
import io.circe.generic.semiauto._
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId, TransactionStatus}
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.models.{Address, Lovelace, _}
import io.iohk.atala.prism.node.cardano.modeltags
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient.{CardanoWalletError, EstimatedFee}
-import io.iohk.atala.prism.node.models.{Balance, WalletDetails, WalletState}
import shapeless.tag
import shapeless.tag.@@
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/logs/CardanoWalletApiClientLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/logs/CardanoWalletApiClientLogs.scala
index e0f811de25..55188c1e1f 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/logs/CardanoWalletApiClientLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/logs/CardanoWalletApiClientLogs.scala
@@ -3,7 +3,7 @@ package io.iohk.atala.prism.node.cardano.wallet.logs
import cats.syntax.apply._
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.models.{Payment, TransactionMetadata, WalletId}
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient.Result
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/metrics/CardanoWalletApiClientMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/metrics/CardanoWalletApiClientMetrics.scala
index 00b0510e50..3c9e68b980 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/metrics/CardanoWalletApiClientMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/cardano/wallet/metrics/CardanoWalletApiClientMetrics.scala
@@ -1,8 +1,8 @@
package io.iohk.atala.prism.node.cardano.wallet.metrics
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.{DomainTimer, MeasureOps}
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.{DomainTimer, MeasureOps}
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.models.{Payment, TransactionMetadata, WalletId}
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient.Result
diff --git a/common/src/main/scala/io/iohk/atala/prism/config/NodeConfig.scala b/node/src/main/scala/io/iohk/atala/prism/node/config/NodeConfig.scala
similarity index 89%
rename from common/src/main/scala/io/iohk/atala/prism/config/NodeConfig.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/config/NodeConfig.scala
index 1dab780e4c..cc078d6ed6 100644
--- a/common/src/main/scala/io/iohk/atala/prism/config/NodeConfig.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/config/NodeConfig.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.config
+package io.iohk.atala.prism.node.config
import com.typesafe.config.Config
diff --git a/common/src/main/scala/io/iohk/atala/prism/db/DbNotificationStreamer.scala b/node/src/main/scala/io/iohk/atala/prism/node/db/DbNotificationStreamer.scala
similarity index 98%
rename from common/src/main/scala/io/iohk/atala/prism/db/DbNotificationStreamer.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/db/DbNotificationStreamer.scala
index 4a1f4fc2cc..815046220b 100644
--- a/common/src/main/scala/io/iohk/atala/prism/db/DbNotificationStreamer.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/db/DbNotificationStreamer.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.db
+package io.iohk.atala.prism.node.db
import cats.effect._
import cats.effect.unsafe.IORuntime
diff --git a/common/src/main/scala/io/iohk/atala/prism/db/TransactorForStreaming.scala b/node/src/main/scala/io/iohk/atala/prism/node/db/TransactorForStreaming.scala
similarity index 85%
rename from common/src/main/scala/io/iohk/atala/prism/db/TransactorForStreaming.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/db/TransactorForStreaming.scala
index 4ef1368905..ee11178034 100644
--- a/common/src/main/scala/io/iohk/atala/prism/db/TransactorForStreaming.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/db/TransactorForStreaming.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.db
+package io.iohk.atala.prism.node.db
import cats.effect.IO
import doobie.Transactor
diff --git a/common/src/main/scala/io/iohk/atala/prism/errors/PrismError.scala b/node/src/main/scala/io/iohk/atala/prism/node/errors/PrismError.scala
similarity index 69%
rename from common/src/main/scala/io/iohk/atala/prism/errors/PrismError.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/errors/PrismError.scala
index d5e5e53709..f8d6e29693 100644
--- a/common/src/main/scala/io/iohk/atala/prism/errors/PrismError.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/errors/PrismError.scala
@@ -1,9 +1,8 @@
-package io.iohk.atala.prism.errors
+package io.iohk.atala.prism.node.errors
-import io.grpc.Status
-
-import io.iohk.atala.prism.protos.credential_models.{AtalaMessage, AtalaErrorMessage}
import com.google.rpc.status.{Status => StatusProto}
+import io.grpc.Status
+import io.iohk.atala.prism.protos.credential_models.{AtalaErrorMessage, AtalaMessage}
trait PrismError {
def toStatus: Status
@@ -18,7 +17,3 @@ trait PrismError {
AtalaMessage().withAtalaErrorMessage(atalaErrorMessage)
}
}
-
-trait PrismServerError extends PrismError {
- def cause: Throwable
-}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/errors/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/errors/package.scala
index 529a757f21..0e3118e8f9 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/errors/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/errors/package.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node
import derevo.derive
import io.grpc.Status
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import tofu.logging.derivation.loggable
import io.iohk.atala.prism.node.models.ProtocolVersion
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/grpc/ProtoCodecs.scala b/node/src/main/scala/io/iohk/atala/prism/node/grpc/ProtoCodecs.scala
index 5b0f279af3..ce280029ed 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/grpc/ProtoCodecs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/grpc/ProtoCodecs.scala
@@ -6,13 +6,13 @@ import io.iohk.atala.prism.protos.models.TimestampInfo
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
-import io.iohk.atala.prism.models.{DidSuffix, Ledger}
+import io.iohk.atala.prism.node.models.{DidSuffix, Ledger}
import io.iohk.atala.prism.protos.common_models
import io.iohk.atala.prism.node.models
import io.iohk.atala.prism.node.models.KeyUsage._
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.protos.node_models
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
import java.time.Instant
diff --git a/common/src/main/scala/io/iohk/atala/prism/interop/implicits.scala b/node/src/main/scala/io/iohk/atala/prism/node/interop/implicits.scala
similarity index 92%
rename from common/src/main/scala/io/iohk/atala/prism/interop/implicits.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/interop/implicits.scala
index 405a1956de..80731a0a2b 100644
--- a/common/src/main/scala/io/iohk/atala/prism/interop/implicits.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/interop/implicits.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.interop
+package io.iohk.atala.prism.node.interop
import cats.data.NonEmptyList
import doobie.{Get, Meta, Read, Write}
@@ -6,8 +6,8 @@ import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256Digest}
import doobie.implicits.legacy.instant._
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{DidSuffix, Ledger, TransactionId}
-import io.iohk.atala.prism.utils.DoobieImplicits.byteArraySeqMeta
+import io.iohk.atala.prism.node.models.{DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.utils.DoobieImplicits.byteArraySeqMeta
import java.time.Instant
import scala.collection.compat.immutable.ArraySeq
diff --git a/common/src/main/scala/io/iohk/atala/prism/interop/toScalaProtos.scala b/node/src/main/scala/io/iohk/atala/prism/node/interop/toScalaProtos.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/interop/toScalaProtos.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/interop/toScalaProtos.scala
index 0c57d7cfce..6d8feb0309 100644
--- a/common/src/main/scala/io/iohk/atala/prism/interop/toScalaProtos.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/interop/toScalaProtos.scala
@@ -1,4 +1,5 @@
-package io.iohk.atala.prism.interop
+package io.iohk.atala.prism.node.interop
+
import io.iohk.atala.prism.protos.node_models
import pbandk.MessageKt
diff --git a/common/src/main/scala/io/iohk/atala/prism/logging/GeneralLoggableInstances.scala b/node/src/main/scala/io/iohk/atala/prism/node/logging/GeneralLoggableInstances.scala
similarity index 96%
rename from common/src/main/scala/io/iohk/atala/prism/logging/GeneralLoggableInstances.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/logging/GeneralLoggableInstances.scala
index ae6507bd58..90bf70f666 100644
--- a/common/src/main/scala/io/iohk/atala/prism/logging/GeneralLoggableInstances.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/logging/GeneralLoggableInstances.scala
@@ -1,9 +1,9 @@
-package io.iohk.atala.prism.logging
+package io.iohk.atala.prism.node.logging
import io.grpc.Status
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import tofu.logging._
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/common/src/main/scala/io/iohk/atala/prism/logging/TraceId.scala b/node/src/main/scala/io/iohk/atala/prism/node/logging/TraceId.scala
similarity index 96%
rename from common/src/main/scala/io/iohk/atala/prism/logging/TraceId.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/logging/TraceId.scala
index 415b98919e..1cb2bcd442 100644
--- a/common/src/main/scala/io/iohk/atala/prism/logging/TraceId.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/logging/TraceId.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.logging
+package io.iohk.atala.prism.node.logging
import cats.data.ReaderT
import cats.effect.IO
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/metrics/NodeReporter.scala b/node/src/main/scala/io/iohk/atala/prism/node/metrics/NodeReporter.scala
index 3184cccd1d..fa00d7f3fd 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/metrics/NodeReporter.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/metrics/NodeReporter.scala
@@ -2,11 +2,11 @@ package io.iohk.atala.prism.node.metrics
import cats.effect.unsafe.IORuntime
import com.typesafe.config.Config
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.cardano.models.WalletId
import io.iohk.atala.prism.node.cardano.{CardanoClient, LAST_SYNCED_BLOCK_NO}
import io.iohk.atala.prism.node.services.{CardanoLedgerService, KeyValueService}
-import io.iohk.atala.prism.tracing.Tracing._
+import io.iohk.atala.prism.node.tracing.Tracing._
import kamon.Kamon
import kamon.metric.{Gauge, PeriodSnapshot}
import kamon.module.MetricReporter
diff --git a/common/src/main/scala/io/iohk/atala/prism/metrics/RequestMeasureUtil.scala b/node/src/main/scala/io/iohk/atala/prism/node/metrics/RequestMeasureUtil.scala
similarity index 98%
rename from common/src/main/scala/io/iohk/atala/prism/metrics/RequestMeasureUtil.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/metrics/RequestMeasureUtil.scala
index 63af4c9195..d67777c82c 100644
--- a/common/src/main/scala/io/iohk/atala/prism/metrics/RequestMeasureUtil.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/metrics/RequestMeasureUtil.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.metrics
+package io.iohk.atala.prism.node.metrics
import cats.instances.future._
import cats.syntax.apply._
diff --git a/common/src/main/scala/io/iohk/atala/prism/metrics/TimeMeasureUtil.scala b/node/src/main/scala/io/iohk/atala/prism/node/metrics/TimeMeasureUtil.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/metrics/TimeMeasureUtil.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/metrics/TimeMeasureUtil.scala
index 3f9138d7e9..bd7ba52c4f 100644
--- a/common/src/main/scala/io/iohk/atala/prism/metrics/TimeMeasureUtil.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/metrics/TimeMeasureUtil.scala
@@ -1,18 +1,17 @@
-package io.iohk.atala.prism.metrics
+package io.iohk.atala.prism.node.metrics
import cats.data.ReaderT
-import cats.effect.{IO, MonadCancel}
import cats.effect.syntax.monadCancel._
-import cats.syntax.functor._
+import cats.effect.{IO, MonadCancel}
import cats.syntax.flatMap._
+import cats.syntax.functor._
import cats.syntax.traverse._
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.{DomainTimer, StartedDomainTimer}
import kamon.Kamon
import kamon.metric.Timer
import kamon.tag.TagSet
-
+import TimeMeasureUtil._
import scala.util.Try
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
trait TimeMeasureMetric[F[_]] {
def startTimer(timer: DomainTimer): F[Try[StartedDomainTimer]]
diff --git a/common/src/main/scala/io/iohk/atala/prism/metrics/UptimeReporter.scala b/node/src/main/scala/io/iohk/atala/prism/node/metrics/UptimeReporter.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/metrics/UptimeReporter.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/metrics/UptimeReporter.scala
index 1aef441ad3..64963f2019 100644
--- a/common/src/main/scala/io/iohk/atala/prism/metrics/UptimeReporter.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/metrics/UptimeReporter.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.metrics
+package io.iohk.atala.prism.node.metrics
import com.typesafe.config.Config
import kamon.Kamon
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectId.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectId.scala
index 3eee43315e..a651bf12a1 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectId.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectId.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.models
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
import io.iohk.atala.prism.protos.node_internal
-import io.iohk.atala.prism.utils.BytesOps
+import io.iohk.atala.prism.node.utils.BytesOps
import tofu.logging.{DictLoggable, LogRenderer}
case class AtalaObjectId(value: Vector[Byte]) {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectInfo.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectInfo.scala
index e4d7c8e257..665f154d64 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectInfo.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectInfo.scala
@@ -1,6 +1,6 @@
package io.iohk.atala.prism.node.models
-import io.iohk.atala.prism.models.TransactionInfo
+import io.iohk.atala.prism.node.models.TransactionInfo
import io.iohk.atala.prism.node.cardano.TX_METADATA_MAX_SIZE
import io.iohk.atala.prism.node.cardano.models.AtalaObjectMetadata
import io.iohk.atala.prism.node.operations.{Operation, parseOperationsFromByteContent}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectTransactionSubmission.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectTransactionSubmission.scala
index 6febef3282..8902f0193c 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectTransactionSubmission.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaObjectTransactionSubmission.scala
@@ -5,7 +5,7 @@ import derevo.derive
import java.time.Instant
import enumeratum.EnumEntry.UpperSnakecase
import enumeratum.{Enum, EnumEntry}
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
import tofu.logging.derivation.loggable
import scala.collection.immutable.IndexedSeq
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/AtalaOperationId.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaOperationId.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/models/AtalaOperationId.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/AtalaOperationId.scala
index 023afd9e60..681dcd6e14 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/AtalaOperationId.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/AtalaOperationId.scala
@@ -1,10 +1,10 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import com.google.protobuf.ByteString
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
import io.iohk.atala.prism.protos.node_models
-import io.iohk.atala.prism.utils.BytesOps
import tofu.logging.{DictLoggable, LogRenderer}
+import io.iohk.atala.prism.node.utils.BytesOps
import java.util.UUID
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/BlockInfo.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/BlockInfo.scala
similarity index 93%
rename from common/src/main/scala/io/iohk/atala/prism/models/BlockInfo.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/BlockInfo.scala
index a225cefe16..ee62ded345 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/BlockInfo.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/BlockInfo.scala
@@ -1,9 +1,9 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
+import com.google.protobuf.timestamp.Timestamp
import derevo.derive
-import tofu.logging.derivation.loggable
import io.iohk.atala.prism.protos.common_models
-import com.google.protobuf.timestamp.Timestamp
+import tofu.logging.derivation.loggable
import java.time.Instant
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/DidSuffix.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/DidSuffix.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/models/DidSuffix.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/DidSuffix.scala
index 57a6111679..68585ae0d3 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/DidSuffix.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/DidSuffix.scala
@@ -1,9 +1,9 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import io.iohk.atala.prism.crypto.Sha256Digest
-import scala.util.{Failure, Success, Try}
import scala.util.matching.Regex
+import scala.util.{Failure, Success, Try}
case class DidSuffix(value: String) extends AnyVal {
def getValue: String = value
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/HashValue.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/HashValue.scala
similarity index 92%
rename from common/src/main/scala/io/iohk/atala/prism/models/HashValue.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/HashValue.scala
index a6689cb7c5..63c72aa561 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/HashValue.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/HashValue.scala
@@ -1,12 +1,11 @@
-package io.iohk.atala.prism.models
-
-import java.util.Locale
+package io.iohk.atala.prism.node.models
import com.typesafe.config.ConfigMemorySize
-import io.iohk.atala.prism.utils.BytesOps
+import java.util.Locale
import scala.collection.compat.immutable.ArraySeq
import scala.util.matching.Regex
+import io.iohk.atala.prism.node.utils.BytesOps
trait HashValue extends Any {
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/IdType.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/IdType.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/models/IdType.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/IdType.scala
index abba882e06..a77727248e 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/IdType.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/IdType.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import io.iohk.atala.prism.crypto.Sha256
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/Ledger.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/Ledger.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/models/Ledger.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/Ledger.scala
index b69e0fc2b6..f71c597eb4 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/Ledger.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/Ledger.scala
@@ -1,9 +1,9 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import derevo.derive
import enumeratum.{Enum, EnumEntry}
-import tofu.logging.derivation.loggable
import io.iohk.atala.prism.protos.common_models
+import tofu.logging.derivation.loggable
@derive(loggable)
sealed trait Ledger extends EnumEntry {
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/TransactionDetails.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionDetails.scala
similarity index 79%
rename from common/src/main/scala/io/iohk/atala/prism/models/TransactionDetails.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/TransactionDetails.scala
index 0c71ac8841..342412656c 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/TransactionDetails.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionDetails.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import derevo.derive
import tofu.logging.derivation.loggable
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/TransactionId.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionId.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/models/TransactionId.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/TransactionId.scala
index 1ee00fb1b8..746b84f2be 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/TransactionId.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionId.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import com.typesafe.config.ConfigMemorySize
import tofu.logging.{DictLoggable, LogRenderer}
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/TransactionInfo.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionInfo.scala
similarity index 92%
rename from common/src/main/scala/io/iohk/atala/prism/models/TransactionInfo.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/TransactionInfo.scala
index 79f6ff54eb..fb213fae90 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/TransactionInfo.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionInfo.scala
@@ -1,8 +1,8 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import derevo.derive
-import tofu.logging.derivation.loggable
import io.iohk.atala.prism.protos.common_models
+import tofu.logging.derivation.loggable
@derive(loggable)
case class TransactionInfo(
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/TransactionStatus.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionStatus.scala
similarity index 92%
rename from common/src/main/scala/io/iohk/atala/prism/models/TransactionStatus.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/TransactionStatus.scala
index 46864cce29..31a4a71900 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/TransactionStatus.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/TransactionStatus.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import derevo.derive
import enumeratum.EnumEntry.Snakecase
diff --git a/common/src/main/scala/io/iohk/atala/prism/models/UUIDValue.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/UUIDValue.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/models/UUIDValue.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/models/UUIDValue.scala
index b88f830617..4b5e96b02a 100644
--- a/common/src/main/scala/io/iohk/atala/prism/models/UUIDValue.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/UUIDValue.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import java.util.UUID
import scala.util.Try
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/models/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/models/package.scala
index 885ed14d76..f712af042f 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/models/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/models/package.scala
@@ -7,7 +7,6 @@ import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256Digest}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{AtalaOperationId, DidSuffix, IdType, Ledger, TransactionId}
import io.iohk.atala.prism.protos.node_models
import tofu.logging.derivation.loggable
diff --git a/common/src/main/scala/io/iohk/atala/prism/nonce/ClientHelper.scala b/node/src/main/scala/io/iohk/atala/prism/node/nonce/ClientHelper.scala
similarity index 82%
rename from common/src/main/scala/io/iohk/atala/prism/nonce/ClientHelper.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/nonce/ClientHelper.scala
index 1a1f74a926..d8f96c4848 100644
--- a/common/src/main/scala/io/iohk/atala/prism/nonce/ClientHelper.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/nonce/ClientHelper.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism.nonce
+package io.iohk.atala.prism.node.nonce
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticationHeader
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticationHeader
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.crypto.keys.ECPrivateKey
import io.iohk.atala.prism.crypto.signature.ECSignature
import io.iohk.atala.prism.identity.{PrismDid => DID}
diff --git a/common/src/main/scala/io/iohk/atala/prism/nonce/RequestAuthenticator.scala b/node/src/main/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticator.scala
similarity index 83%
rename from common/src/main/scala/io/iohk/atala/prism/nonce/RequestAuthenticator.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticator.scala
index 2c9c675250..4389d63053 100644
--- a/common/src/main/scala/io/iohk/atala/prism/nonce/RequestAuthenticator.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticator.scala
@@ -1,6 +1,6 @@
-package io.iohk.atala.prism.nonce
+package io.iohk.atala.prism.node.nonce
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPrivateKey
@@ -8,8 +8,6 @@ import java.util.Base64
class RequestAuthenticator {
- /** Signs the connector request, returning the encoded signature and nonce.
- */
def signConnectorRequest(
request: Array[Byte],
privateKey: ECPrivateKey,
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/CreateDIDOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/CreateDIDOperation.scala
index b5ad387a67..513fb2df1c 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/CreateDIDOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/CreateDIDOperation.scala
@@ -6,7 +6,7 @@ import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.postgres.sqlstate
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.KeyUsage.MasterKey
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.models.{DIDPublicKey, DIDService, ProtocolConstants}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperation.scala
index ddf53fc3d1..67197780a1 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperation.scala
@@ -3,7 +3,7 @@ package io.iohk.atala.prism.node.operations
import cats.data.EitherT
import doobie.free.connection.{ConnectionIO, unit}
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.nodeState.DIDPublicKeyState
import io.iohk.atala.prism.node.models.{KeyUsage, nodeState}
import io.iohk.atala.prism.node.operations.path.{Path, ValueAtPath}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperation.scala
index ea41b811d7..a1e70e4ede 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperation.scala
@@ -7,7 +7,7 @@ import doobie.implicits._
import doobie.postgres.sqlstate
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.nodeState
import io.iohk.atala.prism.node.models.nodeState.{DIDPublicKeyState, LedgerData}
import io.iohk.atala.prism.node.operations.path.{Path, ValueAtPath}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/ParsingUtils.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/ParsingUtils.scala
index ca39b38bf9..f560b25d5e 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/ParsingUtils.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/ParsingUtils.scala
@@ -6,12 +6,12 @@ import com.google.protobuf.ByteString
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.{DIDPublicKey, DIDService, KeyUsage, ProtocolConstants}
import io.iohk.atala.prism.node.operations.ValidationError.{InvalidValue, MissingValue}
import io.iohk.atala.prism.node.operations.path.ValueAtPath
import io.iohk.atala.prism.protos.{common_models, node_models}
-import io.iohk.atala.prism.utils.UriUtils
+import io.iohk.atala.prism.node.utils.UriUtils
import io.circe.parser.{parse => parseJson}
import io.circe.Json
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperation.scala
index 6a20987645..e28788382d 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperation.scala
@@ -5,7 +5,7 @@ import doobie.free.connection.ConnectionIO
import doobie.implicits.toDoobieApplicativeErrorOps
import doobie.postgres.sqlstate
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.cardano.LAST_SYNCED_BLOCK_NO
import io.iohk.atala.prism.node.models.KeyUsage.MasterKey
import io.iohk.atala.prism.node.models.nodeState._
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperation.scala
index 3e86364ce2..95960634d3 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperation.scala
@@ -8,7 +8,7 @@ import doobie.free.connection.ConnectionIO
import doobie.implicits._
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.nodeState
import io.iohk.atala.prism.node.models.nodeState.{DIDPublicKeyState, LedgerData}
import io.iohk.atala.prism.node.operations.path.{Path, ValueAtPath}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperation.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperation.scala
index 04bc58dcf4..0c04f70735 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperation.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperation.scala
@@ -6,7 +6,7 @@ import doobie.free.connection.{ConnectionIO, unit}
import doobie.implicits._
import doobie.postgres.sqlstate
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.nodeState.{DIDPublicKeyState, LedgerData}
import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.operations.StateError
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/operations/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/operations/package.scala
index 8637f601a9..243f1178f7 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/operations/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/operations/package.scala
@@ -8,7 +8,7 @@ import doobie.postgres.sqlstate
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.Sha256Digest
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{DidSuffix, Ledger, TransactionId}
import io.iohk.atala.prism.node.models.ProtocolVersion
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.operations.StateError.{CannotUpdateMetric, UnsupportedOperation}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepository.scala
index 4182a0a24e..6965180503 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepository.scala
@@ -8,8 +8,8 @@ import derevo.tagless.applyK
import doobie.free.connection
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.models.{Ledger, TransactionId, TransactionInfo, TransactionStatus}
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId, TransactionInfo, TransactionStatus}
import io.iohk.atala.prism.node.PublicationInfo
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models._
@@ -19,7 +19,7 @@ import io.iohk.atala.prism.node.repositories.logs.AtalaObjectsTransactionsReposi
import io.iohk.atala.prism.node.repositories.metrics.AtalaObjectsTransactionsRepositoryMetrics
import io.iohk.atala.prism.node.repositories.utils.connectionIOSafe
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaOperationsRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaOperationsRepository.scala
index 0581d32771..dc5bbbb246 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaOperationsRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/AtalaOperationsRepository.scala
@@ -12,8 +12,8 @@ import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.repositories.daos.{AtalaObjectsDAO, AtalaOperationsDAO}
import io.iohk.atala.prism.node.repositories.daos.AtalaObjectsDAO.AtalaObjectCreateData
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
import io.iohk.atala.prism.node.repositories.logs.AtalaOperationsRepositoryLogs
import io.iohk.atala.prism.node.repositories.metrics.AtalaOperationsRepositoryMetrics
import io.iohk.atala.prism.node.repositories.utils.connectionIOSafe
@@ -22,7 +22,7 @@ import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
import cats.effect.MonadCancelThrow
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
@derive(applyK)
trait AtalaOperationsRepository[F[_]] {
diff --git a/common/src/main/scala/io/iohk/atala/prism/repositories/ConnectionIOErrorHandlers.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/ConnectionIOErrorHandlers.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/repositories/ConnectionIOErrorHandlers.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/repositories/ConnectionIOErrorHandlers.scala
index 6381f38166..584b08ec24 100644
--- a/common/src/main/scala/io/iohk/atala/prism/repositories/ConnectionIOErrorHandlers.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/ConnectionIOErrorHandlers.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.repositories
+package io.iohk.atala.prism.node.repositories
import cats.syntax.applicativeError._
import doobie.ConnectionIO
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepository.scala
index d0031a6439..3d830d2593 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepository.scala
@@ -14,10 +14,10 @@ import io.iohk.atala.prism.crypto.Sha256Digest
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, LedgerData}
import io.iohk.atala.prism.node.repositories.daos.CredentialBatchesDAO
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
import io.iohk.atala.prism.node.repositories.logs.CredentialBatchesRepositoryLogs
import io.iohk.atala.prism.node.repositories.metrics.CredentialBatchesRepositoryMetrics
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/DIDDataRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/DIDDataRepository.scala
index 6ade7ef023..c395830f6e 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/DIDDataRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/DIDDataRepository.scala
@@ -12,14 +12,14 @@ import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.util.transactor.Transactor
import io.iohk.atala.prism.identity.{CanonicalPrismDid => DID}
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.nodeState.{DIDDataState, DIDPublicKeyState, DIDServiceState}
import io.iohk.atala.prism.node.repositories.daos.{DIDDataDAO, PublicKeysDAO, ServicesDAO, ContextDAO}
import io.iohk.atala.prism.node.repositories.logs.DIDDataRepositoryLogs
import io.iohk.atala.prism.node.repositories.metrics.DIDDataRepositoryMetrics
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepository.scala
index 2671d52d75..687877dcd9 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepository.scala
@@ -7,8 +7,8 @@ import derevo.derive
import derevo.tagless.applyK
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import io.iohk.atala.prism.node.repositories.daos.KeyValuesDAO
import io.iohk.atala.prism.node.repositories.daos.KeyValuesDAO.KeyValue
import io.iohk.atala.prism.node.repositories.logs.KeyValuesRepositoryLogs
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/MetricsCountersRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/MetricsCountersRepository.scala
index c7e5a69a49..59c564ac54 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/MetricsCountersRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/MetricsCountersRepository.scala
@@ -7,11 +7,11 @@ import derevo.derive
import derevo.tagless.applyK
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
import io.iohk.atala.prism.node.repositories.daos.MetricsCountersDAO
import io.iohk.atala.prism.node.repositories.logs.MetricsCountersRepositoryLogs
import io.iohk.atala.prism.node.repositories.metrics.MetricsCountersRepositoryMetrics
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/ProtocolVersionRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/ProtocolVersionRepository.scala
index f70d596a8a..33cfd479c9 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/ProtocolVersionRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/ProtocolVersionRepository.scala
@@ -7,13 +7,13 @@ import derevo.derive
import derevo.tagless.applyK
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.metrics.TimeMeasureMetric
+import io.iohk.atala.prism.node.metrics.TimeMeasureMetric
import io.iohk.atala.prism.node.models.{ProtocolVersion, ProtocolVersionInfo}
import io.iohk.atala.prism.node.operations.protocolVersion.ifNodeSupportsProtocolVersion
import io.iohk.atala.prism.node.repositories.daos.ProtocolVersionsDAO
import io.iohk.atala.prism.node.repositories.logs.ProtocolVersionRepositoryLogs
import io.iohk.atala.prism.node.repositories.metrics.ProtocolVersionRepositoryMetrics
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
import tofu.syntax.monoid.TofuSemigroupOps
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/RequestNoncesRepository.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/RequestNoncesRepository.scala
index e3573ddea9..c18f757c9c 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/RequestNoncesRepository.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/RequestNoncesRepository.scala
@@ -8,11 +8,11 @@ import derevo.derive
import derevo.tagless.applyK
import doobie.implicits._
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.identity.{PrismDid => DID}
import io.iohk.atala.prism.node.repositories.daos.RequestNoncesDAO
import io.iohk.atala.prism.node.repositories.logs.RequestNoncesRepositoryLogs
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.{Logs, ServiceLogging}
diff --git a/common/src/main/scala/io/iohk/atala/prism/repositories/SchemaMigrations.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/SchemaMigrations.scala
similarity index 91%
rename from common/src/main/scala/io/iohk/atala/prism/repositories/SchemaMigrations.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/repositories/SchemaMigrations.scala
index 8217ae6f20..b51d7e55cd 100644
--- a/common/src/main/scala/io/iohk/atala/prism/repositories/SchemaMigrations.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/SchemaMigrations.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.repositories
+package io.iohk.atala.prism.node.repositories
import org.flywaydb.core.Flyway
diff --git a/common/src/main/scala/io/iohk/atala/prism/repositories/TransactorFactory.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/TransactorFactory.scala
similarity index 98%
rename from common/src/main/scala/io/iohk/atala/prism/repositories/TransactorFactory.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/repositories/TransactorFactory.scala
index fc90029e04..d82415feb0 100644
--- a/common/src/main/scala/io/iohk/atala/prism/repositories/TransactorFactory.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/TransactorFactory.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.repositories
+package io.iohk.atala.prism.node.repositories
import cats.effect.{Async, Resource, Sync}
import com.zaxxer.hikari.HikariConfig
@@ -57,7 +57,6 @@ object TransactorFactory {
hikariConfig.setLeakDetectionThreshold(300000) // 5 mins
hikariConfig.setMinimumIdle(poolSize)
hikariConfig.setMaximumPoolSize(poolSize) // Both Pool size amd Minimum Idle should same and is recommended
-
for {
// Resource yielding a transactor configured with a bounded connect EC and an unbounded
// transaction EC. Everything will be closed and shut down cleanly after use.
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAO.scala
index be39f39654..b6c1cd38a1 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAO.scala
@@ -5,9 +5,7 @@ import doobie.implicits._
import doobie.util.fragment.Fragment
import cats.syntax.functor._
import doobie.implicits.legacy.instant._
-import io.iohk.atala.prism.models.{Ledger, TransactionId, TransactionInfo}
-import io.iohk.atala.prism.node.models.{AtalaObjectTransactionSubmission, AtalaObjectTransactionSubmissionStatus}
-
+import io.iohk.atala.prism.node.models._
import java.time.Instant
object AtalaObjectTransactionSubmissionsDAO {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAO.scala
index 984fd9aa65..93b8cc85e5 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAO.scala
@@ -8,8 +8,7 @@ import doobie.free.connection.{ConnectionIO, unit}
import doobie.implicits._
import doobie.implicits.legacy.instant._
import doobie.util.fragments.in
-import io.iohk.atala.prism.models.{TransactionId, TransactionInfo}
-import io.iohk.atala.prism.node.models.{AtalaObjectId, AtalaObjectInfo, AtalaObjectStatus}
+import io.iohk.atala.prism.node.models._
import java.time.Instant
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAO.scala
index 5a2ab509a4..0ee58a740a 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAO.scala
@@ -6,7 +6,7 @@ import doobie.Fragments.in
import doobie.free.connection.{ConnectionIO, unit}
import doobie.implicits._
import doobie.util.update.Update
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
import io.iohk.atala.prism.node.models.{AtalaObjectId, AtalaOperationInfo, AtalaOperationStatus}
object AtalaOperationsDAO {
diff --git a/common/src/main/scala/io/iohk/atala/prism/daos/BaseDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/BaseDAO.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/daos/BaseDAO.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/BaseDAO.scala
index f38355e91d..7973007798 100644
--- a/common/src/main/scala/io/iohk/atala/prism/daos/BaseDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/BaseDAO.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.daos
+package io.iohk.atala.prism.node.repositories.daos
import doobie.util.invariant.InvalidEnum
import doobie.{Get, Meta, Put}
@@ -8,7 +8,7 @@ import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.{Sha256Digest => SHA256Digest}
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.models.{AtalaOperationId, Ledger, TransactionId, UUIDValue}
+import io.iohk.atala.prism.node.models.{AtalaOperationId, Ledger, TransactionId, UUIDValue}
import java.util.UUID
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAO.scala
index 10fb882f42..7f6b6ec871 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAO.scala
@@ -4,9 +4,9 @@ import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.implicits.legacy.instant._
-import io.iohk.atala.prism.models.{DidSuffix, IdType}
+import io.iohk.atala.prism.node.models.{DidSuffix, IdType}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
object ContextDAO {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/CredentialBatchesDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/CredentialBatchesDAO.scala
index d42e28c764..45e3e15ddb 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/CredentialBatchesDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/CredentialBatchesDAO.scala
@@ -5,15 +5,15 @@ import cats.syntax.functor._
import doobie.Update
import doobie.free.connection.ConnectionIO
import doobie.implicits._
+import doobie.implicits.legacy.instant._
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.MerkleRoot
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.{DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, LedgerData}
import io.iohk.atala.prism.node.repositories.daos._
-import doobie.implicits.legacy.instant._
-import io.iohk.atala.prism.interop.implicits._
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.interop.implicits._
+import io.iohk.atala.prism.node.utils.syntax._
object CredentialBatchesDAO {
case class CreateCredentialBatchData(
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/DIDDataDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/DIDDataDAO.scala
index 771ffcff75..59f8c478ec 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/DIDDataDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/DIDDataDAO.scala
@@ -5,9 +5,9 @@ import doobie.implicits._
import doobie.implicits.legacy.instant._
import cats.syntax.functor._
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.nodeState.LedgerData
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
object DIDDataDAO {
def insert(
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAO.scala
index 2e9662fcc3..b189d81f00 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAO.scala
@@ -3,7 +3,7 @@ package io.iohk.atala.prism.node.repositories.daos
import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.implicits._
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.ProtocolVersion.InitialProtocolVersion
import io.iohk.atala.prism.node.models.ProtocolVersionInfo.InitialProtocolVersionInfo
import io.iohk.atala.prism.node.models.nodeState.LedgerData
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAO.scala
index 32a39e56cf..9bf4c53825 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAO.scala
@@ -5,10 +5,10 @@ import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.implicits.legacy.instant._
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.DIDPublicKey
import io.iohk.atala.prism.node.models.nodeState.{DIDPublicKeyState, LedgerData}
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
import java.time.Instant
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/RequestNoncesDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/RequestNoncesDAO.scala
index 908baad975..865749cbdb 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/RequestNoncesDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/RequestNoncesDAO.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.repositories.daos
import doobie.implicits._
-import io.iohk.atala.prism.auth.model.RequestNonce
+import io.iohk.atala.prism.node.auth.model.RequestNonce
import io.iohk.atala.prism.identity.{PrismDid => DID}
object RequestNoncesDAO {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAO.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAO.scala
index 5c46d8630d..2668b0c9b6 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAO.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAO.scala
@@ -4,10 +4,9 @@ import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.implicits.legacy.instant._
-import io.iohk.atala.prism.models.{DidSuffix, IdType}
-import io.iohk.atala.prism.node.models.DIDService
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.models.nodeState.{DIDServiceState, LedgerData}
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
object ServicesDAO {
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/package.scala
index 269360fa0f..2384ea413d 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/daos/package.scala
@@ -9,12 +9,10 @@ import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256Digest}
-import io.iohk.atala.prism.daos.BaseDAO
-import io.iohk.atala.prism.models._
import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, DIDPublicKeyState, DIDServiceState, LedgerData}
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
import java.time.Instant
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaObjectsTransactionsRepositoryLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaObjectsTransactionsRepositoryLogs.scala
index c3497e103a..df1ad3e4bf 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaObjectsTransactionsRepositoryLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaObjectsTransactionsRepositoryLogs.scala
@@ -4,21 +4,14 @@ import cats.syntax.apply._
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
import cats.syntax.traverse._
-import io.iohk.atala.prism.models.{Ledger, TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.PublicationInfo
import io.iohk.atala.prism.node.errors.NodeError
-import io.iohk.atala.prism.node.models.{
- AtalaObjectInfo,
- AtalaObjectStatus,
- AtalaObjectTransactionSubmission,
- AtalaObjectTransactionSubmissionStatus
-}
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.repositories.AtalaObjectsTransactionsRepository
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
import tofu.higherKind.Mid
import tofu.logging.ServiceLogging
import tofu.syntax.logging._
-
import java.time.Duration
import cats.MonadThrow
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaOperationsRepositoryLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaOperationsRepositoryLogs.scala
index 858ca33d85..897f097e0c 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaOperationsRepositoryLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/AtalaOperationsRepositoryLogs.scala
@@ -11,7 +11,7 @@ import tofu.higherKind.Mid
import tofu.logging.ServiceLogging
import tofu.syntax.logging._
import cats.MonadThrow
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
import io.iohk.atala.prism.node.errors.NodeError
private[repositories] final class AtalaOperationsRepositoryLogs[F[
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/CredentialBatchesRepositoryLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/CredentialBatchesRepositoryLogs.scala
index e8339fdfad..514056e202 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/CredentialBatchesRepositoryLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/CredentialBatchesRepositoryLogs.scala
@@ -8,7 +8,7 @@ import io.iohk.atala.prism.crypto.Sha256Digest
import io.iohk.atala.prism.node.errors
import io.iohk.atala.prism.node.models.nodeState
import io.iohk.atala.prism.node.repositories.CredentialBatchesRepository
-import io.iohk.atala.prism.logging.GeneralLoggableInstances._
+import io.iohk.atala.prism.node.logging.GeneralLoggableInstances._
import tofu.higherKind.Mid
import tofu.logging.ServiceLogging
import tofu.syntax.logging._
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/RequestNoncesRepositoryLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/RequestNoncesRepositoryLogs.scala
index 36b41d0365..638e87923a 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/RequestNoncesRepositoryLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/logs/RequestNoncesRepositoryLogs.scala
@@ -4,7 +4,7 @@ import cats.MonadThrow
import cats.syntax.applicativeError._
import cats.syntax.apply._
import cats.syntax.flatMap._
-import io.iohk.atala.prism.auth.model
+import io.iohk.atala.prism.node.auth.model
import io.iohk.atala.prism.identity.{PrismDid => DID}
import io.iohk.atala.prism.node.repositories.RequestNoncesRepository
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaObjectsTransactionsRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaObjectsTransactionsRepositoryMetrics.scala
index 8b97eb064c..00f42e64aa 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaObjectsTransactionsRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaObjectsTransactionsRepositoryMetrics.scala
@@ -1,9 +1,9 @@
package io.iohk.atala.prism.node.repositories.metrics
import cats.effect.MonadCancelThrow
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
-import io.iohk.atala.prism.models.{Ledger, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.PublicationInfo
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.{
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaOperationsRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaOperationsRepositoryMetrics.scala
index 9f57ebd34f..6074d1644f 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaOperationsRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/AtalaOperationsRepositoryMetrics.scala
@@ -1,14 +1,14 @@
package io.iohk.atala.prism.node.repositories.metrics
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.{AtalaObjectId, AtalaObjectInfo, AtalaOperationInfo, AtalaOperationStatus}
import io.iohk.atala.prism.node.repositories.AtalaOperationsRepository
import io.iohk.atala.prism.protos.node_models.SignedAtalaOperation
import tofu.higherKind.Mid
import cats.effect.MonadCancelThrow
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
private[repositories] final class AtalaOperationsRepositoryMetrics[F[
_
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/CredentialBatchesRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/CredentialBatchesRepositoryMetrics.scala
index 4d7d1ef88d..cdf3f6f3e2 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/CredentialBatchesRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/CredentialBatchesRepositoryMetrics.scala
@@ -2,8 +2,8 @@ package io.iohk.atala.prism.node.repositories.metrics
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, LedgerData}
import io.iohk.atala.prism.node.repositories.CredentialBatchesRepository
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/DIDDataRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/DIDDataRepositoryMetrics.scala
index 59c2b57557..d03a63cbe1 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/DIDDataRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/DIDDataRepositoryMetrics.scala
@@ -1,8 +1,8 @@
package io.iohk.atala.prism.node.repositories.metrics
import io.iohk.atala.prism.identity.CanonicalPrismDid
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.nodeState.DIDDataState
import io.iohk.atala.prism.node.repositories.DIDDataRepository
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/KeyValuesRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/KeyValuesRepositoryMetrics.scala
index 5449538ac7..c23e13bd9b 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/KeyValuesRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/KeyValuesRepositoryMetrics.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.repositories.metrics
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.repositories.KeyValuesRepository
import io.iohk.atala.prism.node.repositories.daos.KeyValuesDAO.KeyValue
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/MetricsCountersRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/MetricsCountersRepositoryMetrics.scala
index d52ebc1cc5..63ed88ef40 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/MetricsCountersRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/MetricsCountersRepositoryMetrics.scala
@@ -1,8 +1,8 @@
package io.iohk.atala.prism.node.repositories.metrics
import cats.effect.MonadCancelThrow
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.repositories.MetricsCountersRepository
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/ProtocolVersionRepositoryMetrics.scala b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/ProtocolVersionRepositoryMetrics.scala
index 5487909410..a373034328 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/ProtocolVersionRepositoryMetrics.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/repositories/metrics/ProtocolVersionRepositoryMetrics.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node.repositories.metrics
-import io.iohk.atala.prism.metrics.TimeMeasureUtil.MeasureOps
-import io.iohk.atala.prism.metrics.{TimeMeasureMetric, TimeMeasureUtil}
+import io.iohk.atala.prism.node.metrics.TimeMeasureUtil.MeasureOps
+import io.iohk.atala.prism.node.metrics.{TimeMeasureMetric, TimeMeasureUtil}
import io.iohk.atala.prism.node.models.{ProtocolVersion, ProtocolVersionInfo}
import io.iohk.atala.prism.node.repositories.ProtocolVersionRepository
import tofu.higherKind.Mid
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/BlockProcessingService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/BlockProcessingService.scala
index d7f6f0aa79..2e594f8dde 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/BlockProcessingService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/BlockProcessingService.scala
@@ -9,7 +9,7 @@ import io.iohk.atala.prism.protos.models.TimestampInfo
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.signature.ECSignature
-import io.iohk.atala.prism.models.{AtalaOperationId, Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{AtalaOperationId, Ledger, TransactionId}
import io.iohk.atala.prism.node.metrics.OperationsCounters
import io.iohk.atala.prism.node.models.AtalaOperationStatus
import io.iohk.atala.prism.node.models.nodeState.LedgerData
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/CardanoLedgerService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/CardanoLedgerService.scala
index b5588c1f18..dc0f916962 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/CardanoLedgerService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/CardanoLedgerService.scala
@@ -9,7 +9,7 @@ import cats.syntax.functor._
import cats.syntax.traverse._
import cats.{Applicative, Comonad, Functor, Monad}
import enumeratum.{Enum, EnumEntry}
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.models.Block.Canonical
import io.iohk.atala.prism.node.cardano.models._
import io.iohk.atala.prism.node.cardano.{CardanoClient, LAST_SYNCED_BLOCK_NO, LAST_SYNCED_BLOCK_TIMESTAMP}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/InMemoryLedgerService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/InMemoryLedgerService.scala
index dae1ad0554..4e9b02c3f6 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/InMemoryLedgerService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/InMemoryLedgerService.scala
@@ -4,7 +4,7 @@ import cats.effect.Resource
import cats.implicits._
import cats.{Applicative, Comonad, Functor, MonadThrow}
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.models.{CardanoWalletError, CardanoWalletErrorCode, Lovelace}
import io.iohk.atala.prism.node.models.Balance
import io.iohk.atala.prism.node.services.logs.UnderlyingLedgerLogs
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/NodeExplorerService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/NodeExplorerService.scala
index faef4b66c1..39f5ac18aa 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/NodeExplorerService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/NodeExplorerService.scala
@@ -5,7 +5,7 @@ import cats.implicits._
import cats.{Comonad, Functor, MonadThrow}
import derevo.derive
import derevo.tagless.applyK
-import io.iohk.atala.prism.models.{TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.models.{TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.UnderlyingLedger
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import io.iohk.atala.prism.node.errors.NodeError
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/NodeService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/NodeService.scala
index 9d9f046271..90e972b0e3 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/NodeService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/NodeService.scala
@@ -9,7 +9,7 @@ import derevo.tagless.applyK
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.Sha256Digest
import io.iohk.atala.prism.identity.{CanonicalPrismDid, PrismDid}
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.grpc.ProtoCodecs
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, DIDDataState, LedgerData}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/ObjectManagementService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/ObjectManagementService.scala
index b94e43b81e..ade1bee0c2 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/ObjectManagementService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/ObjectManagementService.scala
@@ -11,9 +11,7 @@ import derevo.tagless.applyK
import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.util.transactor.Transactor
-import enumeratum.EnumEntry.Snakecase
-import enumeratum.{Enum, EnumEntry}
-import io.iohk.atala.prism.models.{AtalaOperationId, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.models.{AtalaOperationId, TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.cardano.LAST_SYNCED_BLOCK_TIMESTAMP
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.errors.NodeError.{InvalidArgument, UnsupportedProtocolVersion}
@@ -35,7 +33,7 @@ import io.iohk.atala.prism.node.services.logs.ObjectManagementServiceLogs
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
import io.iohk.atala.prism.protos.node_models.SignedAtalaOperation
import io.iohk.atala.prism.protos.{node_internal, node_models}
-import io.iohk.atala.prism.utils.syntax.DBConnectionOps
+import io.iohk.atala.prism.node.utils.syntax.DBConnectionOps
import tofu.higherKind.Mid
import tofu.logging.derivation.loggable
import tofu.logging.{Logs, ServiceLogging}
@@ -44,9 +42,6 @@ import tofu.syntax.monadic._
import java.time.Instant
-private class DuplicateAtalaBlock extends Exception
-private class DuplicateAtalaOperation extends Exception
-
@derive(applyK)
trait ObjectManagementService[F[_]] {
def saveObject(
@@ -317,20 +312,6 @@ private final class ObjectManagementServiceImpl[F[_]: MonadCancelThrow](
}
object ObjectManagementService {
- sealed trait AtalaObjectTransactionStatus extends EnumEntry with Snakecase
- object AtalaObjectTransactionStatus extends Enum[AtalaObjectTransactionStatus] {
- val values: IndexedSeq[AtalaObjectTransactionStatus] = findValues
-
- case object Pending extends AtalaObjectTransactionStatus
- case object InLedger extends AtalaObjectTransactionStatus
- case object Confirmed extends AtalaObjectTransactionStatus
- }
-
- case class AtalaObjectTransactionInfo(
- transaction: TransactionInfo,
- status: AtalaObjectTransactionStatus
- )
-
@derive(loggable)
final case class SaveObjectError(msg: String)
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionSchedulingService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionSchedulingService.scala
index 2ea560294a..da36fe03d6 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionSchedulingService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionSchedulingService.scala
@@ -4,9 +4,9 @@ import cats.Id
import cats.effect.IO
import cats.effect.unsafe.IORuntime
import cats.syntax.functor._
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.services.SubmissionSchedulingService.Config
-import io.iohk.atala.prism.tracing.Tracing._
+import io.iohk.atala.prism.node.tracing.Tracing._
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionService.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionService.scala
index 841be3f2b8..1205865a5f 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionService.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/SubmissionService.scala
@@ -12,7 +12,7 @@ import cats.syntax.flatMap._
import cats.syntax.option._
import derevo.derive
import derevo.tagless.applyK
-import io.iohk.atala.prism.models.{TransactionInfo, TransactionStatus}
+import io.iohk.atala.prism.node.models.{TransactionInfo, TransactionStatus}
import io.iohk.atala.prism.node.UnderlyingLedger
import io.iohk.atala.prism.node.cardano.models.{CardanoWalletError, CardanoWalletErrorCode}
import io.iohk.atala.prism.node.errors.NodeError
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeExplorerServiceLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeExplorerServiceLogs.scala
index 07a4d85699..2b29e56f16 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeExplorerServiceLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeExplorerServiceLogs.scala
@@ -4,7 +4,7 @@ import cats.MonadThrow
import cats.syntax.applicativeError._
import cats.syntax.apply._
import cats.syntax.flatMap._
-import io.iohk.atala.prism.models.{TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.models.{TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.Balance
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeServiceLogging.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeServiceLogging.scala
index cf58a451e2..322a254aad 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeServiceLogging.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/NodeServiceLogging.scala
@@ -6,7 +6,7 @@ import cats.syntax.applicativeError._
import cats.syntax.apply._
import cats.syntax.flatMap._
import com.google.protobuf.ByteString
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
import io.iohk.atala.prism.node.errors
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.ProtocolVersion
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/ObjectManagementServiceLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/ObjectManagementServiceLogs.scala
index f4d2cabdf5..1a502d0dae 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/ObjectManagementServiceLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/ObjectManagementServiceLogs.scala
@@ -3,7 +3,6 @@ package io.iohk.atala.prism.node.services.logs
import cats.MonadThrow
import cats.syntax.applicativeError._
import cats.syntax.traverse._
-import io.iohk.atala.prism.models.{AtalaOperationId, TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.errors
import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.services.ObjectManagementService
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/UnderlyingLedgerLogs.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/UnderlyingLedgerLogs.scala
index 6e80b8d094..67267019ea 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/logs/UnderlyingLedgerLogs.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/logs/UnderlyingLedgerLogs.scala
@@ -3,7 +3,7 @@ package io.iohk.atala.prism.node.services.logs
import cats.syntax.applicativeError._
import cats.syntax.apply._
import cats.syntax.flatMap._
-import io.iohk.atala.prism.models.{AtalaOperationId, Ledger, TransactionDetails, TransactionId}
+import io.iohk.atala.prism.node.models.{AtalaOperationId, Ledger, TransactionDetails, TransactionId}
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import io.iohk.atala.prism.node.{PublicationInfo, UnderlyingLedger}
import io.iohk.atala.prism.protos.node_internal.AtalaObject
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/services/models/package.scala b/node/src/main/scala/io/iohk/atala/prism/node/services/models/package.scala
index 9f06a41e17..c43621e5bd 100644
--- a/node/src/main/scala/io/iohk/atala/prism/node/services/models/package.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/services/models/package.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.services
import cats.implicits._
import derevo.derive
-import io.iohk.atala.prism.models.TransactionInfo
+import io.iohk.atala.prism.node.models.TransactionInfo
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.operations.{
CreateDIDOperation,
diff --git a/common/src/main/scala/io/iohk/atala/prism/tracing/Tracing.scala b/node/src/main/scala/io/iohk/atala/prism/node/tracing/Tracing.scala
similarity index 61%
rename from common/src/main/scala/io/iohk/atala/prism/tracing/Tracing.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/tracing/Tracing.scala
index c8c9db3478..9e29192547 100644
--- a/common/src/main/scala/io/iohk/atala/prism/tracing/Tracing.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/tracing/Tracing.scala
@@ -1,8 +1,8 @@
-package io.iohk.atala.prism.tracing
+package io.iohk.atala.prism.node.tracing
import io.grpc.Context
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticationHeaderParser
-import io.iohk.atala.prism.logging.TraceId
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticationHeaderParser
+import io.iohk.atala.prism.node.logging.TraceId
object Tracing {
def trace[F[_], A](
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/Base64.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/Base64.scala
similarity index 91%
rename from common/src/main/scala/io/iohk/atala/prism/utils/Base64.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/Base64.scala
index e7e4bef39b..215ef8efc7 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/Base64.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/Base64.scala
@@ -1,4 +1,5 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
+
import java.nio.charset.StandardCharsets
import java.util.Base64
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/BytesOps.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/BytesOps.scala
similarity index 94%
rename from common/src/main/scala/io/iohk/atala/prism/utils/BytesOps.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/BytesOps.scala
index 94b093d18e..4d18c19fc9 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/BytesOps.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/BytesOps.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import java.nio.charset.StandardCharsets
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/DoobieImplicits.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/DoobieImplicits.scala
similarity index 95%
rename from common/src/main/scala/io/iohk/atala/prism/utils/DoobieImplicits.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/DoobieImplicits.scala
index 3ba770f436..34d0ec8d3f 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/DoobieImplicits.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/DoobieImplicits.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import doobie.Meta
import io.circe.{Decoder, Encoder, Json}
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/FutureEither.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/FutureEither.scala
similarity index 99%
rename from common/src/main/scala/io/iohk/atala/prism/utils/FutureEither.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/FutureEither.scala
index a6cff84269..d24e337da9 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/FutureEither.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/FutureEither.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import cats.Functor
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/GrpcUtils.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/GrpcUtils.scala
similarity index 98%
rename from common/src/main/scala/io/iohk/atala/prism/utils/GrpcUtils.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/GrpcUtils.scala
index c49bacf222..c6a157fa8a 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/GrpcUtils.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/GrpcUtils.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import java.util.concurrent.TimeUnit
import cats.effect.{Resource, Sync}
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/IOUtils.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/IOUtils.scala
similarity index 70%
rename from common/src/main/scala/io/iohk/atala/prism/utils/IOUtils.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/IOUtils.scala
index 8b712f3124..6d749baf9e 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/IOUtils.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/IOUtils.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import cats.Comonad
import cats.effect.IO
@@ -6,7 +6,6 @@ import cats.effect.unsafe.implicits.global
object IOUtils {
- // Needed for better integration with tests, see io.iohk.atala.prism.management.console.ManagementConsoleRpcSpecBase at 57 line
implicit val ioComonad: Comonad[IO] = new Comonad[IO] {
override def extract[A](x: IO[A]): A = x.unsafeRunSync()
diff --git a/node/src/main/scala/io/iohk/atala/prism/node/utils/UriUtils.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/UriUtils.scala
new file mode 100644
index 0000000000..7bb93177b4
--- /dev/null
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/UriUtils.scala
@@ -0,0 +1,42 @@
+package io.iohk.atala.prism.node.utils
+
+import io.lemonlabs.uri.config.UriConfig
+import io.lemonlabs.uri.encoding.PercentEncoder
+import io.lemonlabs.uri.{Uri, Url, Urn}
+
+object UriUtils {
+
+ private implicit val uriConfig: UriConfig = UriConfig.default.copy(queryEncoder = PercentEncoder())
+
+ def isValidUriString(str: String): Boolean = {
+
+ try {
+ Uri.parse(str) match {
+ case url: Url => url.schemeOption.nonEmpty
+ case Urn(_) => true
+ }
+ } catch {
+ case _: Exception => false
+ }
+ }
+
+ /** Checks if a string is a valid URI fragment according to RFC 3986 section-3.5
+ *
+ * @param str
+ * @return
+ * true if str is a valid URI fragment, otherwise false
+ */
+ def isValidUriFragment(str: String): Boolean = {
+
+ /*
+ * Alphanumeric characters (A-Z, a-z, 0-9)
+ * Some special characters: -._~!$&'()*+,;=:@
+ * Percent-encoded characters, which are represented by the pattern %[0-9A-Fa-f]{2}
+ */
+ val uriFragmentRegex = "^([A-Za-z0-9\\-._~!$&'()*+,;=:@/?]|%[0-9A-Fa-f]{2})*$".r
+
+ // In general, empty URI fragment is a valid fragment, but for our use-case it would be pointless
+ str.nonEmpty && uriFragmentRegex.matches(str)
+ }
+}
diff --git a/common/src/main/scala/io/iohk/atala/prism/utils/syntax.scala b/node/src/main/scala/io/iohk/atala/prism/node/utils/syntax.scala
similarity index 84%
rename from common/src/main/scala/io/iohk/atala/prism/utils/syntax.scala
rename to node/src/main/scala/io/iohk/atala/prism/node/utils/syntax.scala
index d5aaf17e91..7340a09430 100644
--- a/common/src/main/scala/io/iohk/atala/prism/utils/syntax.scala
+++ b/node/src/main/scala/io/iohk/atala/prism/node/utils/syntax.scala
@@ -1,26 +1,18 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import cats.syntax.applicativeError._
import com.google.protobuf.timestamp.Timestamp
import doobie.implicits._
+import io.iohk.atala.prism.node.repositories.ConnectionIOErrorHandlers
import org.slf4j.{Logger, LoggerFactory}
import java.time.Instant
import scala.concurrent.Future
-import scala.util.Try
-import io.iohk.atala.prism.repositories.ConnectionIOErrorHandlers
object syntax {
lazy val logger: Logger = LoggerFactory.getLogger(this.getClass)
- implicit class SyntaxOps[A](exp: => A) {
-
- /** returns a Future containing the value without creating a new thread
- */
- def tryF: Future[A] = Future.fromTry(Try { exp })
- }
-
implicit class InstantToTimestampOps(val value: Instant) extends AnyVal {
/** converts instant to proto timestamp */
diff --git a/common/src/test/resources/application.conf b/node/src/test/resources/application.conf
similarity index 62%
rename from common/src/test/resources/application.conf
rename to node/src/test/resources/application.conf
index 17b28e0add..7148a4aed9 100644
--- a/common/src/test/resources/application.conf
+++ b/node/src/test/resources/application.conf
@@ -1,12 +1,5 @@
-managementConsole {
+nodeExplorer {
whitelistDids = [
"did:prism:5f0ffa312e8c6f260dbe6dbaa1e4e0d685aba03297c4e4f9ae80fa8d3fd7c0b0:Cj8KPRI7CgdtYXN0ZXIwEAFKLgoJc2VjcDI1NmsxEiEDhyJiYbQZs28bivj9PXsitEWca1MDg3yeW9ziiNcG-Cs"
]
}
-
-api {
- authTokens = [
- "ShVvJ11AlVhLYv7OBO9sY9AOz8D5FoWo"
- ]
- authEnabled = false
-}
\ No newline at end of file
diff --git a/node/src/test/resources/bitcoin/blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464 b/node/src/test/resources/bitcoin/blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464
deleted file mode 100644
index e263ff3f69..0000000000
--- a/node/src/test/resources/bitcoin/blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "hash": "2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464",
- "confirmations": 773438,
- "strippedsize": 852,
- "size": 852,
- "weight": 3408,
- "height": 40181,
- "version": 536870912,
- "versionHex": "20000000",
- "merkleroot": "9ea68a8a9648864b049079c9b151f411e002a5727b982285fd1f6319d58e93d5",
- "tx": [
- "f78011d83798ba5eb928bd6e79a35cfd8a03d3657044a32c24a1f8f99d5f0125",
- "3d488d9381b09954b5a9606b365ab0aaeca6aa750bdba79436e416ad6702226a",
- "356f3fc21f4f55a6aa0cac21ceab47d79dc8a95b08c234c3b6721dc769900056"
- ],
- "time": 1522836382,
- "mediantime": 1522836253,
- "nonce": 0,
- "bits": "1c036834",
- "difficulty": 75.13731391411834,
- "chainwork": "0000000000000000000000000000000000000000000000000012579e001aa440",
- "previousblockhash": "9d9816b341307d43aab6d1049ca51078b4656a3e31bd3abe7956b9df18f857e9",
- "nextblockhash": "7a72adbecd80c28eeda2718db41f1fbfb483678f559b4007cd9acd001aa79305"
-}
diff --git a/node/src/test/resources/bitcoin/full-blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464 b/node/src/test/resources/bitcoin/full-blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464
deleted file mode 100644
index f419ff7b42..0000000000
--- a/node/src/test/resources/bitcoin/full-blocks/2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464
+++ /dev/null
@@ -1,164 +0,0 @@
-{
- "hash": "2dc74e01317c32509dd530ad466f1ec11582d95d7dba44fe069ca66c9f330464",
- "confirmations": 773438,
- "strippedsize": 852,
- "size": 852,
- "weight": 3408,
- "height": 40181,
- "version": 536870912,
- "versionHex": "20000000",
- "merkleroot": "9ea68a8a9648864b049079c9b151f411e002a5727b982285fd1f6319d58e93d5",
- "tx": [
- {
- "txid": "f78011d83798ba5eb928bd6e79a35cfd8a03d3657044a32c24a1f8f99d5f0125",
- "hash": "f78011d83798ba5eb928bd6e79a35cfd8a03d3657044a32c24a1f8f99d5f0125",
- "version": 1,
- "size": 66,
- "vsize": 66,
- "weight": 264,
- "locktime": 0,
- "vin": [
- {
- "coinbase": "03f59c000101",
- "sequence": 4294967295
- }
- ],
- "vout": [
- {
- "value": 0.00000000,
- "n": 0,
- "scriptPubKey": {
- "asm": "",
- "hex": "",
- "type": "nonstandard"
- }
- }
- ],
- "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0603f59c000101ffffffff0100000000000000000000000000"
- },
- {
- "txid": "3d488d9381b09954b5a9606b365ab0aaeca6aa750bdba79436e416ad6702226a",
- "hash": "3d488d9381b09954b5a9606b365ab0aaeca6aa750bdba79436e416ad6702226a",
- "version": 1,
- "size": 234,
- "vsize": 234,
- "weight": 936,
- "locktime": 0,
- "vin": [
- {
- "txid": "a3afec89801dd45f666339a96d2862e75392c68e804b35297239c5372ce408ae",
- "vout": 1,
- "scriptSig": {
- "asm": "3044022042777ded890d8ad684e807d33036b2477700d89837b2a230908b71f16f159657022054ce173bc13ffef44628b7ad52250c69db5d772183190d1e1968d1df8914c762[ALL] 03e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2",
- "hex": "473044022042777ded890d8ad684e807d33036b2477700d89837b2a230908b71f16f159657022054ce173bc13ffef44628b7ad52250c69db5d772183190d1e1968d1df8914c762012103e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2"
- },
- "sequence": 4294967295
- }
- ],
- "vout": [
- {
- "value": 0.00000000,
- "n": 0,
- "scriptPubKey": {
- "asm": "",
- "hex": "",
- "type": "nonstandard"
- }
- },
- {
- "value": 1340.00000000,
- "n": 1,
- "scriptPubKey": {
- "asm": "OP_DUP OP_HASH160 750e6be359f1a26f6b16b0b3957e8f8270eb46f2 OP_EQUALVERIFY OP_CHECKSIG",
- "hex": "76a914750e6be359f1a26f6b16b0b3957e8f8270eb46f288ac",
- "reqSigs": 1,
- "type": "pubkeyhash",
- "addresses": [
- "XmMnAiXCHcVhSTfdKHYNsaZZytSnKH9reb"
- ]
- }
- },
- {
- "value": 22.50000000,
- "n": 2,
- "scriptPubKey": {
- "asm": "OP_DUP OP_HASH160 a5ba7dc6c242607ef094c79ccfb8bdaddf831e97 OP_EQUALVERIFY OP_CHECKSIG",
- "hex": "76a914a5ba7dc6c242607ef094c79ccfb8bdaddf831e9788ac",
- "reqSigs": 1,
- "type": "pubkeyhash",
- "addresses": [
- "Xqo8jYoaMvfeE28Zux69Q3zgzT9aDbsqnG"
- ]
- }
- }
- ],
- "hex": "0100000001ae08e42c37c5397229354b808ec69253e762286da93963665fd41d8089ecafa3010000006a473044022042777ded890d8ad684e807d33036b2477700d89837b2a230908b71f16f159657022054ce173bc13ffef44628b7ad52250c69db5d772183190d1e1968d1df8914c762012103e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2ffffffff0300000000000000000000bc05331f0000001976a914750e6be359f1a26f6b16b0b3957e8f8270eb46f288ac80461c86000000001976a914a5ba7dc6c242607ef094c79ccfb8bdaddf831e9788ac00000000"
- },
- {
- "txid": "356f3fc21f4f55a6aa0cac21ceab47d79dc8a95b08c234c3b6721dc769900056",
- "hash": "356f3fc21f4f55a6aa0cac21ceab47d79dc8a95b08c234c3b6721dc769900056",
- "version": 1,
- "size": 373,
- "vsize": 373,
- "weight": 1492,
- "locktime": 40158,
- "vin": [
- {
- "txid": "f8b83a6ccf1068155f5a4abfb15f548cffd164d44479e6f5a3647b43a5b2876e",
- "vout": 2,
- "scriptSig": {
- "asm": "304402200f99576dd834945a9f2b9c61ad742d9a424f701db54d1e4aeb85ee207bf0180802200a1a85d2113e5875f657f1f29afca350327a384b7510a6b2a21f6e63298882ac[ALL] 03e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2",
- "hex": "47304402200f99576dd834945a9f2b9c61ad742d9a424f701db54d1e4aeb85ee207bf0180802200a1a85d2113e5875f657f1f29afca350327a384b7510a6b2a21f6e63298882ac012103e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2"
- },
- "sequence": 4294967294
- }
- ],
- "vout": [
- {
- "value": 0.00000000,
- "n": 0,
- "scriptPubKey": {
- "asm": "OP_RETURN 586a55587938507a55464d78534c37594135767866574a587365746b354d5638676f 58794a4338786e664672484e634d696e68366778755052595939484361593944416f 99 1f60a6a385a4e5163ffef65dd873f17452bb0d9f89da701ffcc5a0f72287273c0571485c29123fef880d2d8169cfdb884bf95a18a0b36461517acda390ce4cf441",
- "hex": "6a22586a55587938507a55464d78534c37594135767866574a587365746b354d5638676f2258794a4338786e664672484e634d696e68366778755052595939484361593944416f0163411f60a6a385a4e5163ffef65dd873f17452bb0d9f89da701ffcc5a0f72287273c0571485c29123fef880d2d8169cfdb884bf95a18a0b36461517acda390ce4cf441",
- "type": "nulldata"
- }
- },
- {
- "value": 1.00000000,
- "n": 1,
- "scriptPubKey": {
- "asm": "OP_DUP OP_HASH160 60653a6bcde4494bf67de03338d25ed9c576dd49 OP_EQUALVERIFY OP_CHECKSIG",
- "hex": "76a91460653a6bcde4494bf67de03338d25ed9c576dd4988ac",
- "reqSigs": 1,
- "type": "pubkeyhash",
- "addresses": [
- "XjUXy8PzUFMxSL7YA5vxfWJXsetk5MV8go"
- ]
- }
- },
- {
- "value": 1249.00000000,
- "n": 2,
- "scriptPubKey": {
- "asm": "OP_DUP OP_HASH160 e92011c71d996b43432eff60d98bea648ed07d8e OP_EQUALVERIFY OP_CHECKSIG",
- "hex": "76a914e92011c71d996b43432eff60d98bea648ed07d8e88ac",
- "reqSigs": 1,
- "type": "pubkeyhash",
- "addresses": [
- "XwwVhB3eUS7j8cB9frxQ6ed2KeFubRaebY"
- ]
- }
- }
- ],
- "hex": "01000000016e87b2a5437b64a3f5e67944d464d1ff8c545fb1bf4a5a5f156810cf6c3ab8f8020000006a47304402200f99576dd834945a9f2b9c61ad742d9a424f701db54d1e4aeb85ee207bf0180802200a1a85d2113e5875f657f1f29afca350327a384b7510a6b2a21f6e63298882ac012103e85efa8cc86226833b2059c2af17d79ef8235359b14dd36c8000eb72bf09eba2feffffff0300000000000000008b6a22586a55587938507a55464d78534c37594135767866574a587365746b354d5638676f2258794a4338786e664672484e634d696e68366778755052595939484361593944416f0163411f60a6a385a4e5163ffef65dd873f17452bb0d9f89da701ffcc5a0f72287273c0571485c29123fef880d2d8169cfdb884bf95a18a0b36461517acda390ce4cf44100e1f505000000001976a91460653a6bcde4494bf67de03338d25ed9c576dd4988ac00c19e141d0000001976a914e92011c71d996b43432eff60d98bea648ed07d8e88acde9c0000"
- }
- ],
- "time": 1522836382,
- "mediantime": 1522836253,
- "nonce": 0,
- "bits": "1c036834",
- "difficulty": 75.13731391411834,
- "chainwork": "0000000000000000000000000000000000000000000000000012579e001aa440",
- "previousblockhash": "9d9816b341307d43aab6d1049ca51078b4656a3e31bd3abe7956b9df18f857e9",
- "nextblockhash": "7a72adbecd80c28eeda2718db41f1fbfb483678f559b4007cd9acd001aa79305"
-}
diff --git a/node/src/test/resources/db/testmigration/V2__create_config.sql b/node/src/test/resources/db/testmigration/V2__create_config.sql
new file mode 100644
index 0000000000..ccf8cc9e37
--- /dev/null
+++ b/node/src/test/resources/db/testmigration/V2__create_config.sql
@@ -0,0 +1,5 @@
+CREATE TABLE config
+(
+ key TEXT NOT NULL PRIMARY KEY,
+ value TEXT NOT NULL
+);
diff --git a/common/src/test/scala/io/iohk/atala/prism/AtalaWithPostgresSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/AtalaWithPostgresSpec.scala
similarity index 72%
rename from common/src/test/scala/io/iohk/atala/prism/AtalaWithPostgresSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/AtalaWithPostgresSpec.scala
index 5b5c79bf05..0aef3a36a4 100644
--- a/common/src/test/scala/io/iohk/atala/prism/AtalaWithPostgresSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/AtalaWithPostgresSpec.scala
@@ -1,10 +1,10 @@
-package io.iohk.atala.prism
+package io.iohk.atala.prism.node
import cats.effect.IO
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.repositories.PostgresRepositorySpec
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.repositories.PostgresRepositorySpec
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.ExecutionContext
@@ -16,4 +16,5 @@ class AtalaWithPostgresSpec extends PostgresRepositorySpec[IO] with ScalaFutures
implicit val pc: PatienceConfig = PatienceConfig(20.seconds, 5.millis)
val dbLiftedToTraceIdIO: Transactor[IOWithTraceIdContext] =
db.mapK(TraceId.liftToIOWithTraceId)
+
}
diff --git a/common/src/test/scala/io/iohk/atala/prism/DIDUtil.scala b/node/src/test/scala/io/iohk/atala/prism/node/DIDUtil.scala
similarity index 97%
rename from common/src/test/scala/io/iohk/atala/prism/DIDUtil.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/DIDUtil.scala
index a41be1c305..79a025e4be 100644
--- a/common/src/test/scala/io/iohk/atala/prism/DIDUtil.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/DIDUtil.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism
+package io.iohk.atala.prism.node
import com.google.protobuf.ByteString
-import io.iohk.atala.prism.auth.SignedRpcRequest
+import io.iohk.atala.prism.node.auth.SignedRpcRequest
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.{ECKeyPair, ECPublicKey}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/DataPreparation.scala b/node/src/test/scala/io/iohk/atala/prism/node/DataPreparation.scala
index a4cd23029b..ce34e740bf 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/DataPreparation.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/DataPreparation.scala
@@ -8,9 +8,8 @@ import doobie.implicits._
import doobie.util.transactor.Transactor
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256, Sha256Digest}
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.cardano.{LAST_SYNCED_BLOCK_NO, LAST_SYNCED_BLOCK_TIMESTAMP}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.grpc.ProtoCodecs
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/NodeExplorerServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/NodeExplorerServiceSpec.scala
index fd5e72b50b..b2274523a5 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/NodeExplorerServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/NodeExplorerServiceSpec.scala
@@ -7,13 +7,13 @@ import com.google.protobuf.ByteString
import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
import io.grpc.stub.MetadataUtils
import io.grpc.{ManagedChannel, Server, StatusRuntimeException}
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.auth.WhitelistedAuthenticatorF
-import io.iohk.atala.prism.auth.grpc.GrpcAuthenticatorInterceptor
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.auth.WhitelistedAuthenticatorF
+import io.iohk.atala.prism.node.auth.grpc.GrpcAuthenticatorInterceptor
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.cardano.models.{CardanoWalletError, Lovelace}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.models.AtalaObjectStatus.{Pending, Scheduled}
@@ -30,7 +30,7 @@ import io.iohk.atala.prism.node.services.{
ObjectManagementService,
StatisticsService
}
-import io.iohk.atala.prism.nonce.{ClientHelper, RequestAuthenticator}
+import io.iohk.atala.prism.node.nonce.{ClientHelper, RequestAuthenticator}
import io.iohk.atala.prism.protos.node_api.GetScheduledOperationsRequest.OperationType.{
AnyOperationType,
CreateDidOperationOperationType,
@@ -40,7 +40,7 @@ import io.iohk.atala.prism.protos.node_api.NodeExplorerServiceGrpc.NodeExplorerS
import io.iohk.atala.prism.protos.node_api._
import io.iohk.atala.prism.protos.node_models.SignedAtalaOperation
import io.iohk.atala.prism.protos.{node_api, node_internal, node_models}
-import io.iohk.atala.prism.utils.IOUtils.ioComonad
+import io.iohk.atala.prism.node.utils.IOUtils.ioComonad
import org.mockito.scalatest.{MockitoSugar, ResetMocksAfterEachTest}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.OptionValues._
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/NodeServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/NodeServiceSpec.scala
index f49f21b8fa..20d4702003 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/NodeServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/NodeServiceSpec.scala
@@ -8,13 +8,13 @@ import com.google.protobuf.ByteString
import doobie.implicits._
import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
import io.grpc.{ManagedChannel, Server, Status, StatusRuntimeException}
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256}
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models.{AtalaOperationId, DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.models.{AtalaOperationId, DidSuffix, Ledger, TransactionId}
import io.iohk.atala.prism.node.errors.NodeError
import io.iohk.atala.prism.node.grpc.ProtoCodecs
import io.iohk.atala.prism.node.models._
@@ -28,8 +28,8 @@ import io.iohk.atala.prism.protos.models.TimestampInfo
import io.iohk.atala.prism.protos.node_api._
import io.iohk.atala.prism.protos.node_models.OperationOutput
import io.iohk.atala.prism.protos.{common_models, node_api, node_models}
-import io.iohk.atala.prism.utils.IOUtils._
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.syntax._
import org.mockito.scalatest.{MockitoSugar, ResetMocksAfterEachTest}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.OptionValues._
diff --git a/common/src/test/scala/io/iohk/atala/prism/RpcSpecBase.scala b/node/src/test/scala/io/iohk/atala/prism/node/RpcSpecBase.scala
similarity index 94%
rename from common/src/test/scala/io/iohk/atala/prism/RpcSpecBase.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/RpcSpecBase.scala
index ac59675286..e482dd66b7 100644
--- a/common/src/test/scala/io/iohk/atala/prism/RpcSpecBase.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/RpcSpecBase.scala
@@ -1,20 +1,19 @@
-package io.iohk.atala.prism
+package io.iohk.atala.prism.node
import cats.effect.IO
import io.grpc._
import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
-import io.iohk.atala.prism.auth.SignedRpcRequest
-import io.iohk.atala.prism.auth.grpc.{GrpcAuthenticationHeader, GrpcAuthenticatorInterceptor}
+import io.iohk.atala.prism.node.auth.SignedRpcRequest
+import io.iohk.atala.prism.node.auth.grpc.{GrpcAuthenticationHeader, GrpcAuthenticatorInterceptor}
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.{ECKeyPair, ECPublicKey}
import io.iohk.atala.prism.crypto.signature.ECSignature
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import org.scalatest.BeforeAndAfterEach
import scalapb.GeneratedMessage
import tofu.logging.Logs
-
import _root_.java.util.concurrent.{Executor, TimeUnit}
trait ApiTestHelper[STUB] {
diff --git a/common/src/test/scala/io/iohk/atala/prism/TestConstants.scala b/node/src/test/scala/io/iohk/atala/prism/node/TestConstants.scala
similarity index 66%
rename from common/src/test/scala/io/iohk/atala/prism/TestConstants.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/TestConstants.scala
index 3621c19abb..899c8caf79 100644
--- a/common/src/test/scala/io/iohk/atala/prism/TestConstants.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/TestConstants.scala
@@ -1,6 +1,6 @@
-package io.iohk.atala.prism
+package io.iohk.atala.prism.node
-import io.iohk.atala.prism.models.TransactionId
+import io.iohk.atala.prism.node.models.TransactionId
object TestConstants {
diff --git a/common/src/test/scala/io/iohk/atala/prism/auth/SignedRpcRequest.scala b/node/src/test/scala/io/iohk/atala/prism/node/auth/SignedRpcRequest.scala
similarity index 93%
rename from common/src/test/scala/io/iohk/atala/prism/auth/SignedRpcRequest.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/auth/SignedRpcRequest.scala
index badecd5ca3..2ba2453f42 100644
--- a/common/src/test/scala/io/iohk/atala/prism/auth/SignedRpcRequest.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/auth/SignedRpcRequest.scala
@@ -1,6 +1,6 @@
-package io.iohk.atala.prism.auth
+package io.iohk.atala.prism.node.auth
-import io.iohk.atala.prism.auth
+import io.iohk.atala.prism.node.auth
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECKeyPair
import io.iohk.atala.prism.crypto.signature.ECSignature
diff --git a/common/src/test/scala/io/iohk/atala/prism/auth/utils/DIDUtilsSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DIDUtilsSpec.scala
similarity index 96%
rename from common/src/test/scala/io/iohk/atala/prism/auth/utils/DIDUtilsSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DIDUtilsSpec.scala
index 0a5eb107d8..9927bddae6 100644
--- a/common/src/test/scala/io/iohk/atala/prism/auth/utils/DIDUtilsSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DIDUtilsSpec.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism.auth.utils
+package io.iohk.atala.prism.node.auth.utils
import com.google.protobuf.ByteString
-import io.iohk.atala.prism.auth.errors.UnknownPublicKeyId
+import io.iohk.atala.prism.node.auth.errors.UnknownPublicKeyId
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.{ECKeyPair, ECPublicKey}
import io.iohk.atala.prism.protos.node_models
diff --git a/common/src/test/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoaderSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoaderSpec.scala
similarity index 84%
rename from common/src/test/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoaderSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoaderSpec.scala
index f9718064f1..4dff458a1a 100644
--- a/common/src/test/scala/io/iohk/atala/prism/auth/utils/DidWhitelistLoaderSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/auth/utils/DidWhitelistLoaderSpec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.auth.utils
+package io.iohk.atala.prism.node.auth.utils
import com.typesafe.config.ConfigFactory
import io.iohk.atala.prism.identity.{PrismDid => DID}
@@ -9,7 +9,7 @@ class DidWhitelistLoaderSpec extends AnyWordSpec with Matchers {
"DidWhitelistLoader" should {
"be able to load whitelist from file" in {
val globalConfig = ConfigFactory.load()
- DidWhitelistLoader.load(globalConfig, "managementConsole") must be(
+ DidWhitelistLoader.load(globalConfig, "nodeExplorer") must be(
Set(
DID.fromString(
"did:prism:5f0ffa312e8c6f260dbe6dbaa1e4e0d685aba03297c4e4f9ae80fa8d3fd7c0b0:Cj8KPRI7CgdtYXN0ZXIwEAFKLgoJc2VjcDI1NmsxEiEDhyJiYbQZs28bivj9PXsitEWca1MDg3yeW9ziiNcG-Cs"
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepositorySpec.scala
index 9e10c787bf..b56a25928e 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/CardanoBlockRepositorySpec.scala
@@ -4,7 +4,7 @@ import cats.effect.IO
import cats.effect.unsafe.implicits.global
import cats.scalatest.EitherMatchers._
import io.circe.Json
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.cardano.dbsync.repositories.testing.TestCardanoBlockRepository
import io.iohk.atala.prism.node.cardano.models._
import tofu.logging.Logging.Make
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/testing/TestCardanoBlockRepository.scala b/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/testing/TestCardanoBlockRepository.scala
index a49746eaab..40006c5d06 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/testing/TestCardanoBlockRepository.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/cardano/dbsync/repositories/testing/TestCardanoBlockRepository.scala
@@ -7,9 +7,9 @@ import doobie.implicits._
import doobie.implicits.legacy.instant._
import doobie.postgres.circe.json.implicits.jsonPut
import doobie.util.transactor.Transactor
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.models._
-import io.iohk.atala.prism.utils.DoobieImplicits._
+import io.iohk.atala.prism.node.utils.DoobieImplicits._
import scala.util.Random
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClientSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClientSpec.scala
index aaad84a958..3edb0e9058 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClientSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/cardano/wallet/CardanoWalletApiClientSpec.scala
@@ -4,7 +4,7 @@ import cats.effect.IO
import cats.effect.unsafe.implicits.global
import cats.scalatest.EitherMatchers._
import io.circe.Json
-import io.iohk.atala.prism.models.{TransactionDetails, TransactionId, TransactionStatus}
+import io.iohk.atala.prism.node.models.{TransactionDetails, TransactionId, TransactionStatus}
import io.iohk.atala.prism.node.cardano.models._
import io.iohk.atala.prism.node.cardano.wallet.CardanoWalletApiClient.{CardanoWalletError, ErrorResponse, EstimatedFee}
import io.iohk.atala.prism.node.cardano.wallet.testing.FakeCardanoWalletApiClient
diff --git a/common/src/test/scala/io/iohk/atala/prism/db/DbNotificationStreamerSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/db/DbNotificationStreamerSpec.scala
similarity index 95%
rename from common/src/test/scala/io/iohk/atala/prism/db/DbNotificationStreamerSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/db/DbNotificationStreamerSpec.scala
index 5616b58685..db9b195d3b 100644
--- a/common/src/test/scala/io/iohk/atala/prism/db/DbNotificationStreamerSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/db/DbNotificationStreamerSpec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.db
+package io.iohk.atala.prism.node.db
import cats.effect.kernel.Outcome
import cats.effect.{IO, OutcomeIO}
@@ -7,8 +7,8 @@ import cats.effect.unsafe.implicits.global
import doobie.HC
import doobie.implicits._
import doobie.postgres._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.db.DbNotificationStreamer.DbNotification
+import io.iohk.atala.prism.node.db.DbNotificationStreamer.DbNotification
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import org.scalatest.{Assertion, Assertions}
import scala.concurrent.duration._
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/models/AtalaObjectInfoSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/models/AtalaObjectInfoSpec.scala
index 3a6e0b0dd7..2589b400d6 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/models/AtalaObjectInfoSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/models/AtalaObjectInfoSpec.scala
@@ -1,6 +1,6 @@
package io.iohk.atala.prism.node.models
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.operations.CreateDIDOperationSpec
import io.iohk.atala.prism.node.services.BlockProcessingServiceSpec
import io.iohk.atala.prism.protos.node_models.SignedAtalaOperation
diff --git a/common/src/test/scala/io/iohk/atala/prism/models/KeyData.scala b/node/src/test/scala/io/iohk/atala/prism/node/models/KeyData.scala
similarity index 84%
rename from common/src/test/scala/io/iohk/atala/prism/models/KeyData.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/models/KeyData.scala
index 545aeb3574..fc54629421 100644
--- a/common/src/test/scala/io/iohk/atala/prism/models/KeyData.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/models/KeyData.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.models
+package io.iohk.atala.prism.node.models
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.protos.models.TimestampInfo
diff --git a/common/src/test/scala/io/iohk/atala/prism/nonce/ClientHelperSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/nonce/ClientHelperSpec.scala
similarity index 90%
rename from common/src/test/scala/io/iohk/atala/prism/nonce/ClientHelperSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/nonce/ClientHelperSpec.scala
index ab05b4e933..b677ea54ef 100644
--- a/common/src/test/scala/io/iohk/atala/prism/nonce/ClientHelperSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/nonce/ClientHelperSpec.scala
@@ -1,7 +1,7 @@
-package io.iohk.atala.prism.nonce
+package io.iohk.atala.prism.node.nonce
-import io.iohk.atala.prism.DIDUtil
-import io.iohk.atala.prism.auth.utils.DIDUtils
+import io.iohk.atala.prism.node.DIDUtil
+import io.iohk.atala.prism.node.auth.utils.DIDUtils
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.protos.connector_api
import org.scalatest.OptionValues._
diff --git a/common/src/test/scala/io/iohk/atala/prism/nonce/RequestAuthenticatorSpecBase.scala b/node/src/test/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticatorSpecBase.scala
similarity index 97%
rename from common/src/test/scala/io/iohk/atala/prism/nonce/RequestAuthenticatorSpecBase.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticatorSpecBase.scala
index 0bd40804eb..8c5379cea8 100644
--- a/common/src/test/scala/io/iohk/atala/prism/nonce/RequestAuthenticatorSpecBase.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/nonce/RequestAuthenticatorSpecBase.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.nonce
+package io.iohk.atala.prism.node.nonce
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.signature.ECSignature
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/CreateDIDOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/CreateDIDOperationSpec.scala
index 30fe98275c..7d8782fe0f 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/CreateDIDOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/CreateDIDOperationSpec.scala
@@ -3,7 +3,6 @@ package io.iohk.atala.prism.node.operations
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
import io.iohk.atala.prism.crypto.keys.{ECKeyPair, ECPublicKey}
@@ -14,7 +13,7 @@ import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, ProtocolConstants
import io.iohk.atala.prism.node.operations.StateError.UnsupportedOperation
import io.iohk.atala.prism.node.operations.protocolVersion.SupportedOperations
import io.iohk.atala.prism.node.repositories.daos.ServicesDAO
-import io.iohk.atala.prism.node.{DataPreparation, models}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation, models}
import io.iohk.atala.prism.protos.node_models
import io.iohk.atala.prism.protos.node_models.{AtalaOperation, CompressedECKeyData, ECKeyData}
import org.scalatest.EitherValues._
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperationSpec.scala
index 0e8310fc11..04a76d0016 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/DeactivateDIDOperationSpec.scala
@@ -3,10 +3,9 @@ package io.iohk.atala.prism.node.operations
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.DidSuffix
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.DidSuffix
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.DataPreparation.{dummyApplyOperationConfig, dummyLedgerData}
import io.iohk.atala.prism.node.repositories.daos.PublicKeysDAO
import io.iohk.atala.prism.node.services.BlockProcessingServiceSpec
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperationSpec.scala
index 47eebec6a8..92d5318acc 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/IssueCredentialBatchOperationSpec.scala
@@ -3,9 +3,8 @@ package io.iohk.atala.prism.node.operations
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.DataPreparation.{dummyApplyOperationConfig, dummyLedgerData}
import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, KeyUsage}
import io.iohk.atala.prism.node.repositories.daos.CredentialBatchesDAO
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtoParsingTestHelpers.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtoParsingTestHelpers.scala
index 47c9eb4d29..83213cb837 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtoParsingTestHelpers.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtoParsingTestHelpers.scala
@@ -4,7 +4,7 @@ import cats.data.NonEmptyList
import java.time.Instant
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.services.BlockProcessingServiceSpec
import io.iohk.atala.prism.protos.node_models
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperationSpec.scala
index 3b30ed8989..085d59c659 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/ProtocolVersionUpdateOperationSpec.scala
@@ -2,11 +2,10 @@ package io.iohk.atala.prism.node.operations
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models.DidSuffix
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.models.DidSuffix
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.DataPreparation.dummyLedgerData
import io.iohk.atala.prism.node.cardano.LAST_SYNCED_BLOCK_NO
import io.iohk.atala.prism.node.models.ProtocolVersion.InitialProtocolVersion
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperationSpec.scala
index e37c97fd2e..089505df30 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/RevokeCredentialsOperationSpec.scala
@@ -3,11 +3,11 @@ package io.iohk.atala.prism.node.operations
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.DataPreparation.{dummyApplyOperationConfig, dummyLedgerData}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.repositories.daos.CredentialBatchesDAO
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperationSpec.scala
index fc10a16b69..06ca33f0af 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/operations/UpdateDIDOperationSpec.scala
@@ -4,10 +4,9 @@ import cats.data.NonEmptyList
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.DataPreparation.{dummyApplyOperationConfig, dummyLedgerData, dummyTimestampInfo}
import io.iohk.atala.prism.node.grpc.ProtoCodecs
import io.iohk.atala.prism.node.models.{DIDPublicKey, DIDService, KeyUsage, ProtocolConstants}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/CredVerification.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/CredVerification.scala
index d56e2ea773..32ff6fd285 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/CredVerification.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/CredVerification.scala
@@ -5,8 +5,8 @@ import cats.implicits.{catsSyntaxTuple6Semigroupal, catsSyntaxValidatedId}
import io.iohk.atala.prism.credentials.PrismCredential
import io.iohk.atala.prism.crypto.{MerkleInclusionProof, MerkleRoot}
import io.iohk.atala.prism.api.CredentialBatches
+import io.iohk.atala.prism.node.models.KeyData
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.KeyData
object CredVerification {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/EncodedSizes.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/EncodedSizes.scala
index 0b59c27f3d..928c707bb8 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/EncodedSizes.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/EncodedSizes.scala
@@ -6,7 +6,7 @@ import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.keys.ECPublicKey
import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.protos.node_models
object EncodedSizes {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/GenericCredentialsSDK.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/GenericCredentialsSDK.scala
index a9494cfc2a..c564b53c0a 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/GenericCredentialsSDK.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/GenericCredentialsSDK.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.poc
import io.iohk.atala.prism.credentials.content.CredentialContent
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import kotlinx.serialization.json.JsonElementKt.JsonPrimitive
import kotlinx.serialization.json.JsonObject
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/Wallet.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/Wallet.scala
index d6c21761c8..72dcdfa94a 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/Wallet.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/Wallet.scala
@@ -12,7 +12,7 @@ import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
import io.iohk.atala.prism.protos.{node_api, node_models}
import io.iohk.atala.prism.crypto.signature.ECSignature
import io.iohk.atala.prism.identity.PrismDid
-import io.iohk.atala.prism.models.{DidSuffix, KeyData}
+import io.iohk.atala.prism.node.models.{DidSuffix, KeyData}
import io.iohk.atala.prism.node.grpc.ProtoCodecs
import io.iohk.atala.prism.node.poc.CredVerification.{BatchData, VerificationError}
import org.scalatest.OptionValues.convertOptionToValuable
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/batch/FlowPoC.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/batch/FlowPoC.scala
index 2bcb3d8e0e..8682ad6b1c 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/batch/FlowPoC.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/batch/FlowPoC.scala
@@ -7,15 +7,15 @@ import cats.syntax.functor._
import com.google.protobuf.ByteString
import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
import io.grpc.{ManagedChannel, Server}
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.api.CredentialBatches
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.credentials.json.JsonBasedCredential
import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
import io.iohk.atala.prism.identity.PrismDid.{getDEFAULT_MASTER_KEY_ID => masterKeyId}
import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.operations.ApplyOperationConfig
import io.iohk.atala.prism.node.poc.CredVerification.VerificationError._
import io.iohk.atala.prism.node.poc.{GenericCredentialsSDK, Wallet}
@@ -24,11 +24,10 @@ import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
import io.iohk.atala.prism.node.services._
import io.iohk.atala.prism.node.{DataPreparation, NodeGrpcServiceImpl, UnderlyingLedger}
import io.iohk.atala.prism.protos.node_api
-import io.iohk.atala.prism.utils.IOUtils._
-import io.iohk.atala.prism.utils.NodeClientUtils._
+import io.iohk.atala.prism.node.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.NodeClientUtils._
import org.scalatest.BeforeAndAfterEach
import tofu.logging.Logs
-
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.DurationInt
import scala.jdk.CollectionConverters._
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsFlowPoC.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsFlowPoC.scala
deleted file mode 100644
index 88a1f4d88f..0000000000
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsFlowPoC.scala
+++ /dev/null
@@ -1,402 +0,0 @@
-package io.iohk.atala.prism.node.poc.endorsements
-
-import cats.effect.IO
-import cats.effect.unsafe.implicits.global
-import cats.syntax.functor._
-import com.google.protobuf.ByteString
-import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
-import io.grpc.{ManagedChannel, Server}
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.api.CredentialBatches
-import io.iohk.atala.prism.credentials.CredentialBatchId
-import io.iohk.atala.prism.credentials.content.CredentialContent
-import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
-import io.iohk.atala.prism.crypto.keys.ECPublicKey
-import io.iohk.atala.prism.crypto.signature.ECSignature
-import io.iohk.atala.prism.crypto.{Sha256, Sha256Digest}
-import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models.DidSuffix
-import io.iohk.atala.prism.node.grpc.ProtoCodecs
-import io.iohk.atala.prism.node.operations.ApplyOperationConfig
-import io.iohk.atala.prism.node.poc.Wallet
-import io.iohk.atala.prism.node.poc.endorsements.EndorsementsService.SignedKey
-import io.iohk.atala.prism.node.repositories._
-import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
-import io.iohk.atala.prism.node.services._
-import io.iohk.atala.prism.node.{DataPreparation, NodeGrpcServiceImpl, UnderlyingLedger}
-import io.iohk.atala.prism.protos.endorsements_api.{
- EndorseInstitutionRequest,
- GetEndorsementsRequest,
- GetFreshMasterKeyRequest,
- RevokeEndorsementRequest
-}
-import io.iohk.atala.prism.protos.node_api.{GetDidDocumentRequest, ScheduleOperationsRequest}
-import io.iohk.atala.prism.protos.{node_api, node_models}
-import io.iohk.atala.prism.utils.IOUtils._
-import io.iohk.atala.prism.utils.NodeClientUtils.{issueBatchOperation, revokeCredentialsOperation}
-import org.scalatest.BeforeAndAfterEach
-import org.scalatest.OptionValues.convertOptionToValuable
-import tofu.logging.Logs
-
-import java.util.concurrent.TimeUnit
-import scala.concurrent.duration.DurationInt
-import scala.jdk.CollectionConverters._
-
-class EndorsementsFlowPoC extends AtalaWithPostgresSpec with BeforeAndAfterEach {
- import Utils._
-
- private val endorsementsFlowPoCLogs =
- Logs.withContext[IO, IOWithTraceIdContext]
- protected var serverName: String = _
- protected var serverHandle: Server = _
- protected var channelHandle: ManagedChannel = _
- protected var nodeServiceStub: node_api.NodeServiceGrpc.NodeServiceBlockingStub = _
- protected var didDataRepository: DIDDataRepository[IOWithTraceIdContext] = _
- protected var atalaOperationsRepository: AtalaOperationsRepository[IOWithTraceIdContext] = _
- protected var atalaObjectsTransactionsRepository: AtalaObjectsTransactionsRepository[IOWithTraceIdContext] = _
- protected var keyValuesRepository: KeyValuesRepository[IOWithTraceIdContext] =
- _
- protected var credentialBatchesRepository: CredentialBatchesRepository[IOWithTraceIdContext] = _
- protected var atalaReferenceLedger: UnderlyingLedger[IOWithTraceIdContext] = _
- protected var blockProcessingService: BlockProcessingServiceImpl = _
- protected var objectManagementService: ObjectManagementService[IOWithTraceIdContext] = _
- protected var metricsCountersRepository: MetricsCountersRepository[IOWithTraceIdContext] = _
- protected var submissionService: SubmissionService[IOWithTraceIdContext] = _
- protected var submissionSchedulingService: SubmissionSchedulingService = _
- protected var protocolVersionsRepository: ProtocolVersionRepository[IOWithTraceIdContext] = _
- private val publicKeysLimit = 10
- private val servicesLimit = 10
-
- override def beforeEach(): Unit = {
- super.beforeEach()
-
- didDataRepository = DIDDataRepository.unsafe(dbLiftedToTraceIdIO, endorsementsFlowPoCLogs)
- credentialBatchesRepository = CredentialBatchesRepository.unsafe(
- dbLiftedToTraceIdIO,
- endorsementsFlowPoCLogs
- )
- protocolVersionsRepository = ProtocolVersionRepository.unsafe(
- dbLiftedToTraceIdIO,
- endorsementsFlowPoCLogs
- )
-
- atalaReferenceLedger = InMemoryLedgerService.unsafe(onAtalaReference, endorsementsFlowPoCLogs)
- blockProcessingService = new BlockProcessingServiceImpl(ApplyOperationConfig(DidSuffix("0a1e3")))
- atalaOperationsRepository = AtalaOperationsRepository.unsafe(
- dbLiftedToTraceIdIO,
- endorsementsFlowPoCLogs
- )
- metricsCountersRepository = MetricsCountersRepository.unsafe(dbLiftedToTraceIdIO, endorsementsFlowPoCLogs)
- atalaObjectsTransactionsRepository = AtalaObjectsTransactionsRepository
- .unsafe(dbLiftedToTraceIdIO, endorsementsFlowPoCLogs)
- keyValuesRepository = KeyValuesRepository.unsafe(dbLiftedToTraceIdIO, endorsementsFlowPoCLogs)
- objectManagementService = ObjectManagementService.unsafe(
- atalaOperationsRepository,
- atalaObjectsTransactionsRepository,
- keyValuesRepository,
- protocolVersionsRepository,
- blockProcessingService,
- publicKeysLimit,
- servicesLimit,
- dbLiftedToTraceIdIO,
- endorsementsFlowPoCLogs
- )
- def onAtalaReference(notification: AtalaObjectNotification): IOWithTraceIdContext[Unit] =
- objectManagementService
- .saveObject(notification)
- .void
-
- submissionService = SubmissionService.unsafe(
- atalaReferenceLedger,
- atalaOperationsRepository,
- atalaObjectsTransactionsRepository,
- logs = endorsementsFlowPoCLogs
- )
- // this service needs to pull operations from the database and to send them to the ledger
- submissionSchedulingService = SubmissionSchedulingService(
- SubmissionSchedulingService.Config(
- refreshAndSubmitPeriod = 1.second,
- moveScheduledToPendingPeriod = 2.second
- ),
- submissionService
- )
-
- serverName = InProcessServerBuilder.generateName()
-
- serverHandle = InProcessServerBuilder
- .forName(serverName)
- .directExecutor()
- .addService(
- node_api.NodeServiceGrpc
- .bindService(
- new NodeGrpcServiceImpl(
- NodeService.unsafe(
- didDataRepository,
- objectManagementService,
- credentialBatchesRepository,
- endorsementsFlowPoCLogs
- )
- ),
- executionContext
- )
- )
- .build()
- .start()
-
- channelHandle = InProcessChannelBuilder.forName(serverName).directExecutor().build()
-
- nodeServiceStub = node_api.NodeServiceGrpc.blockingStub(channelHandle)
- }
-
- override def afterEach(): Unit = {
- channelHandle.shutdown()
- channelHandle.awaitTermination(10, TimeUnit.SECONDS)
- serverHandle.shutdown()
- serverHandle.awaitTermination()
- super.afterEach()
- }
-
- "The batch issuance/verification flow" should {
- "work" in {
-
- val endorsementsService = EndorsementsService(nodeServiceStub)
- // we will make use of the toy wallet already implemented
- val wallet = Wallet(nodeServiceStub)
-
- // the steps of the flow to implement
- // 1. the MoE generates its DID
- val (moeDIDSuffix, createDIDOp) = wallet.generateDID()
- val moeDID = DID.fromString(s"did:prism:${moeDIDSuffix.getValue}")
- val signedAtalaOperation =
- wallet.signOperation(createDIDOp, "master0", moeDIDSuffix)
- val createDIDResponse = nodeServiceStub
- .scheduleOperations(
- ScheduleOperationsRequest(List(signedAtalaOperation))
- )
- .outputs
- .head
-
- // 2. We create 100 signed keys (we will later define how to derive them properly)
- val issuanceKeyId = "issuance0"
- val signedKeys: List[SignedKey] = (1 to 100).toList.map { _ =>
- val keyPair = EC.generateKeyPair()
- val publicKey = keyPair.getPublicKey
- SignedKey(
- publicKey,
- wallet.signKey(publicKey, issuanceKeyId, moeDIDSuffix),
- issuanceKeyId
- )
- }
-
- // 3. we initialize the endorsements service. This registers the MoE DID and
- // a set of signed public keys
- endorsementsService
- .initialize(moeDID, signedKeys)
- .futureValue
-
- // 4. the MoE requests a master key the region
- val freshKeyProto = endorsementsService
- .getFreshMasterKey(
- GetFreshMasterKeyRequest()
- .withEndorserDID(moeDID.getValue)
- )
- .futureValue
-
- val retrievedKey =
- SignedKey(
- fromProtoKeyData(freshKeyProto.getKey),
- new ECSignature(freshKeyProto.signature.toByteArray),
- freshKeyProto.signingKeyId
- )
-
- DataPreparation.waitConfirmation(
- nodeServiceStub,
- createDIDResponse.getOperationId
- )
- // 5. the MoE validates the key signature
- val moeIssuingKey = ProtoCodecs
- .fromProtoKey(
- nodeServiceStub
- .getDidDocument(
- GetDidDocumentRequest(moeDID.getValue)
- )
- .getDocument
- .publicKeys
- .find(_.id == retrievedKey.signingKeyId)
- .get
- )
- .get
-
- wallet
- .verifySignedKey(
- retrievedKey.key,
- retrievedKey.signature,
- moeIssuingKey
- ) mustBe true
-
- // 6. the MoE shares the key with the region to endorse
- // 7. the region first generates its DID, then create a DID update
- // that adds the key shared by the MoE as master key, and removes
- // the original master key of the DID
- val (regionDIDSuffix, regionCreateDIDOp) = wallet.generateDID()
- val regionDID = DID.fromString(s"did:prism:${regionDIDSuffix.getValue}")
- val updateAddMoEKeyOp = updateDIDOp(
- Sha256.compute(regionCreateDIDOp.toByteArray),
- regionDIDSuffix,
- retrievedKey.key,
- "master0"
- )
-
- val signedRegionCreateDIDOp =
- wallet.signOperation(regionCreateDIDOp, "master0", regionDIDSuffix)
- val signedAddKeyOp =
- wallet.signOperation(updateAddMoEKeyOp, "master0", regionDIDSuffix)
-
- // the region now published the CreateDID and UpdateDID operations
- val scheduleOperationsResponse = nodeServiceStub.scheduleOperations(
- ScheduleOperationsRequest(
- Seq(
- signedRegionCreateDIDOp,
- signedAddKeyOp
- )
- )
- )
- scheduleOperationsResponse.outputs.size must be(2)
- DataPreparation.waitConfirmation(
- nodeServiceStub,
- scheduleOperationsResponse.outputs.map(
- _.operationMaybe.operationId.value
- ): _*
- )
-
- // 8. the region shares back its DID
- // 9. the MoE generates an endorsements credential and calls the endorsement RPC
- val credential =
- wallet.signCredential(
- {
- import kotlinx.serialization.json.JsonElementKt._
- import kotlinx.serialization.json.JsonObject
- val map = Map(
- "id" -> JsonPrimitive(moeDID.getValue),
- "keyId" -> JsonPrimitive(issuanceKeyId),
- "credentialSubject" -> JsonPrimitive(
- s"{'endorses': ${regionDID.getValue}"
- )
- )
- new CredentialContent(new JsonObject(map.asJava))
- },
- issuanceKeyId,
- moeDIDSuffix
- )
-
- val batch = CredentialBatches.batch(List(credential).asJava)
- val (root, proof) = (batch.getRoot, batch.getProofs.asScala.toList)
- val issueOp = issueBatchOperation(moeDID, root)
- val batchId = CredentialBatchId.fromBatchData(moeDIDSuffix.value, root)
- val issueOpHash = Sha256.compute(issueOp.toByteArray)
- val signedIssuanceOp =
- wallet.signOperation(issueOp, issuanceKeyId, moeDIDSuffix)
- endorsementsService
- .endorseInstitution(
- EndorseInstitutionRequest()
- .withParentDID(moeDID.getValue)
- .withChildDID(regionDID.getValue)
- .withCredential(credential.getCanonicalForm)
- .withEncodedMerkleProof(proof.head.encode)
- .withIssueBatch(signedIssuanceOp)
- )
- .futureValue
-
- // 11. we check the validity interval of the newly endorsed DID
- val validityInterval = endorsementsService
- .getEndorsements(
- GetEndorsementsRequest()
- .withDid(regionDID.getValue)
- )
- .futureValue
-
- println(validityInterval.toProtoString)
-
- // We revoke the endorsement
- val revocationKeyId = "revocation0"
- wallet.addRevocationKeyToDid(
- revocationKeyId = revocationKeyId,
- previousOperationHash = ByteString.copyFrom(Sha256.compute(createDIDOp.toByteArray).getValue),
- didSuffix = moeDIDSuffix
- )
-
- val revokeOp = revokeCredentialsOperation(issueOpHash, batchId)
- val signedRevokeOp =
- wallet.signOperation(revokeOp, revocationKeyId, moeDIDSuffix)
- endorsementsService
- .revokeEndorsement(
- RevokeEndorsementRequest()
- .withParentDID(moeDID.getValue)
- .withChildDID(regionDID.getValue)
- .withRevokeBatch(signedRevokeOp)
- )
- .futureValue
-
- // we check the validity interval of the un-endorsed DID
- val validityInterval2 = endorsementsService
- .getEndorsements(
- GetEndorsementsRequest()
- .withDid(regionDID.getValue)
- )
- .futureValue
-
- println(validityInterval2.toProtoString)
- }
- }
-}
-
-object Utils {
-
- def fromProtoKeyData(keyData: node_models.ECKeyData): ECPublicKey = {
- EC.toPublicKeyFromByteCoordinates(
- keyData.x.toByteArray,
- keyData.y.toByteArray
- )
- }
-
- def updateDIDOp(
- previousHash: Sha256Digest,
- suffix: DidSuffix,
- keyToAdd: ECPublicKey,
- keyIdToRemove: String
- ): node_models.AtalaOperation = {
- node_models.AtalaOperation(
- operation = node_models.AtalaOperation.Operation.UpdateDid(
- node_models.UpdateDIDOperation(
- previousOperationHash = ByteString.copyFrom(previousHash.getValue),
- id = suffix.getValue,
- actions = Seq(
- node_models.UpdateDIDAction(
- node_models.UpdateDIDAction.Action.AddKey(
- node_models.AddKeyAction(
- key = Some(
- node_models.PublicKey(
- id = "masterMoE",
- usage = node_models.KeyUsage.MASTER_KEY,
- keyData = node_models.PublicKey.KeyData
- .EcKeyData(ProtoCodecs.toECKeyData(keyToAdd))
- )
- )
- )
- )
- ),
- node_models.UpdateDIDAction(
- node_models.UpdateDIDAction.Action.RemoveKey(
- node_models.RemoveKeyAction(
- keyId = keyIdToRemove
- )
- )
- )
- )
- )
- )
- )
- }
-
-}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsService.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsService.scala
deleted file mode 100644
index 2e260ba211..0000000000
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/endorsements/EndorsementsService.scala
+++ /dev/null
@@ -1,247 +0,0 @@
-package io.iohk.atala.prism.node.poc.endorsements
-
-import java.time.Instant
-import com.google.protobuf.ByteString
-import io.iohk.atala.prism.credentials.json.JsonBasedCredential
-import io.iohk.atala.prism.credentials.CredentialBatchId
-import io.iohk.atala.prism.identity.{PrismDid => DID}
-import io.iohk.atala.prism.crypto.ECConfig.{INSTANCE => ECConfig}
-import io.iohk.atala.prism.crypto.keys.ECPublicKey
-import io.iohk.atala.prism.crypto.signature.ECSignature
-import io.iohk.atala.prism.crypto.{MerkleInclusionProof, MerkleRoot, Sha256Digest}
-import io.iohk.atala.prism.api.CredentialBatches
-import io.iohk.atala.prism.node.grpc.ProtoCodecs
-import io.iohk.atala.prism.protos.endorsements_api._
-import io.iohk.atala.prism.protos.node_api._
-import io.iohk.atala.prism.protos.node_models
-import io.iohk.atala.prism.protos.node_models.{KeyUsage, SignedAtalaOperation}
-import io.iohk.atala.prism.utils.syntax.InstantToTimestampOps
-
-import scala.concurrent.{ExecutionContext, Future}
-import scala.util.Try
-
-case class EndorsementsService(
- nodeServiceStub: NodeServiceGrpc.NodeServiceBlockingStub
-)(implicit
- executionContext: ExecutionContext
-) extends EndorsementsServiceGrpc.EndorsementsService {
- import EndorsementsService._
-
- // private state
- private var moeDID: DID = _
- private var trustedDIDs: Set[DID] = Set.empty
- private var signedKeys: List[SignedKey] = List.empty
- private var requestedBy: Map[ECPublicKey, DID] = Map.empty
- private var endorsedBy: Map[DID, DID] = Map.empty
- private var keyAssigned: Map[DID, ECPublicKey] = Map.empty
- private var validIn: Map[DID, List[ValidInterval]] =
- Map.empty.withDefaultValue(Nil)
-
- private var lastRequested: Int = -1
- private def nextKey(): SignedKey = {
- lastRequested += 1
- if (lastRequested < signedKeys.size) signedKeys(lastRequested)
- else throw new RuntimeException("Ran out of keys. Please add more keys")
- }
- private def isAlreadyEndorsed(did: DID): Boolean = {
- validIn(did).lastOption.exists(_.to.isEmpty)
- }
- private def updatedValidInterval(
- did: DID,
- timestamp: Instant
- ): List[ValidInterval] = {
- val periods = validIn(did)
- val newValidInterval = periods.last.copy(to = Some(timestamp))
- periods.init :+ newValidInterval
- }
-
- // management related api
- def initialize(initialDID: DID, keys: List[SignedKey]): Future[Unit] =
- Future {
- moeDID = initialDID
- signedKeys = keys
- trustedDIDs = Set(initialDID)
- }
-
- def getMoEDID(): Future[DID] = Future.successful(moeDID)
-
- // API
- def getFreshMasterKey(
- request: GetFreshMasterKeyRequest
- ): Future[GetFreshMasterKeyResponse] = {
- Future.successful {
- val requester: DID = DID.fromString(request.endorserDID)
- val signedKey = nextKey()
- requestedBy = requestedBy.updated(signedKey.key, requester)
- println(s"assigned key: ${signedKey.key}")
- GetFreshMasterKeyResponse()
- .withKey(publicKeyToProto(signedKey.key))
- .withSignature(ByteString.copyFrom(signedKey.signature.getData))
- .withSigningKeyId(signedKey.signingKeyId)
- }
- }
-
- def endorseInstitution(
- request: EndorseInstitutionRequest
- ): Future[EndorseInstitutionResponse] =
- Future {
- val parentDID: DID = DID.fromString(request.parentDID)
- val childDID: DID = DID.fromString(request.childDID)
- val signedOperation: SignedAtalaOperation = request.getIssueBatch
-
- val response = nodeServiceStub.getDidDocument(
- GetDidDocumentRequest(childDID.toString)
- )
- val childMasterKeyList =
- response.getDocument.publicKeys.filter(k => k.usage == KeyUsage.MASTER_KEY && k.revokedOn.isEmpty)
- val childMasterKey =
- ProtoCodecs
- .fromProtoKey(childMasterKeyList.head)
- .getOrElse(throw new RuntimeException("Failed to parse key"))
-
- val parentAssociatedToKey = requestedBy.getOrElse(
- childMasterKey,
- throw new RuntimeException("unknown key")
- )
-
- val credential = JsonBasedCredential.fromString(request.credential)
- val credentialDID = Option(credential.getContent.getIssuerDid).get
- val operationDID =
- DID.buildCanonical(
- Sha256Digest.fromHex(
- signedOperation.getOperation.getIssueCredentialBatch.getCredentialBatchData.issuerDid
- )
- )
- val operationMerkleRoot = new MerkleRoot(
- Sha256Digest.fromBytes(
- signedOperation.getOperation.getIssueCredentialBatch.getCredentialBatchData.merkleRoot.toByteArray
- )
- )
- val decodedProof = MerkleInclusionProof.decode(request.encodedMerkleProof)
- val proofDerivedRoot = decodedProof.derivedRoot
-
- if (
- // there should be a check that the parentDID represents a role that can onboard the child DID
-
- // tne child institution has only one active master key
- childMasterKeyList.size == 1 &&
- // the key was requested by the parent institution
- parentDID == parentAssociatedToKey &&
- // the credential issuer matches the requester DID
- parentDID == credentialDID &&
- // the parent DID is the same than the one signing the operation
- parentDID == operationDID &&
- // the credential is included in the issuing operation
- operationMerkleRoot == proofDerivedRoot &&
- CredentialBatches.verifyInclusion(
- credential,
- operationMerkleRoot,
- decodedProof
- ) &&
- // the DID is not already endorsed
- !isAlreadyEndorsed(childDID)
- ) {
-
- // if all checks are valid we issue the credential
- nodeServiceStub
- .scheduleOperations(
- ScheduleOperationsRequest(List(signedOperation))
- )
-
- val interval = ValidInterval(
- from = Instant.now(),
- to = None,
- verifiableCredential = request.credential,
- inclusionProof = request.encodedMerkleProof
- )
-
- trustedDIDs = trustedDIDs + childDID
- endorsedBy = endorsedBy.updated(childDID, parentDID)
- keyAssigned = keyAssigned.updated(childDID, childMasterKey)
- validIn = validIn.updated(
- childDID,
- validIn.getOrElse(childDID, Nil) :+ interval
- )
- EndorseInstitutionResponse()
- } else {
- throw new RuntimeException("Endorsement validation failed")
- }
- }
-
- def getEndorsements(
- request: GetEndorsementsRequest
- ): Future[GetEndorsementsResponse] =
- Future {
- val did = DID.fromString(request.did)
- val intervals = validIn(did).map { interval =>
- ValidityInterval(to = interval.to.map(_.toProtoTimestamp))
- .withFrom(interval.from.toProtoTimestamp)
- .withCredential(interval.verifiableCredential)
- .withEncodedMerkleProof(interval.inclusionProof)
- }
- GetEndorsementsResponse()
- .withIntervals(intervals)
- }
-
- def revokeEndorsement(
- request: RevokeEndorsementRequest
- ): Future[RevokeEndorsementResponse] =
- Future {
- val parentDID = DID.fromString(request.parentDID)
- val childDID = DID.fromString(request.childDID)
- val revokeOperation = request.getRevokeBatch
-
- if (endorsedBy(childDID) == parentDID) {
- nodeServiceStub.scheduleOperations(
- ScheduleOperationsRequest(List(revokeOperation))
- )
-
- val revocationTime = Instant.now()
- trustedDIDs = trustedDIDs - childDID
- validIn = validIn.updated(
- childDID,
- updatedValidInterval(childDID, revocationTime)
- )
- RevokeEndorsementResponse()
- } else {
- throw new RuntimeException("Revocation failed")
- }
- }
-}
-
-object EndorsementsService {
- case class ValidInterval(
- from: Instant,
- to: Option[Instant],
- verifiableCredential: String,
- inclusionProof: String
- ) {
- def batchId: CredentialBatchId = {
- val issuerDID = Try(
- JsonBasedCredential
- .fromString(verifiableCredential)
- .getContent
- .getIssuerDid
- ).getOrElse(throw new RuntimeException("missing issuer DID"))
- CredentialBatchId.fromBatchData(
- issuerDID.getSuffix,
- MerkleInclusionProof.decode(inclusionProof).derivedRoot
- )
- }
- }
-
- case class SignedKey(
- key: ECPublicKey,
- signature: ECSignature,
- signingKeyId: String
- )
-
- def publicKeyToProto(key: ECPublicKey): node_models.ECKeyData = {
- val point = key.getCurvePoint
- node_models.ECKeyData(
- curve = ECConfig.getCURVE_NAME,
- x = ByteString.copyFrom(point.getX.bytes()),
- y = ByteString.copyFrom(point.getY.bytes())
- )
- }
-}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/poc/estimations/CardanoFeeEstimator.scala b/node/src/test/scala/io/iohk/atala/prism/node/poc/estimations/CardanoFeeEstimator.scala
index d83071ed63..cfb52d226d 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/poc/estimations/CardanoFeeEstimator.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/poc/estimations/CardanoFeeEstimator.scala
@@ -19,7 +19,7 @@ import io.iohk.atala.prism.protos.{node_internal, node_models}
import org.scalatest.OptionValues._
import org.scalatest.concurrent.ScalaFutures._
import tofu.logging.Logs
-
+import io.iohk.atala.prism.node.utils.IOUtils._
import scala.collection.mutable.ListBuffer
import scala.concurrent.duration._
@@ -301,17 +301,16 @@ object CardanoFeeEstimator {
estimator.estimate(List(nationalExamCertBody) ++ schoolIssuers)
println(s"""Ethiopia estimation:
- | Initial setup (DID creation):
- |${estimation.didCreation.toString(" - ")}
- | Yearly (credential issuing):
- |${estimation.credentialIssuing.toString(" - ")}
- | Total:
- |${estimation.toString(" - ")}
- |""".stripMargin)
+ | Initial setup (DID creation):
+ |${estimation.didCreation.toString(" - ")}
+ | Yearly (credential issuing):
+ |${estimation.credentialIssuing.toString(" - ")}
+ | Total:
+ |${estimation.toString(" - ")}
+ |""".stripMargin)
}
private def createCardanoFeeEstimator(): CardanoFeeEstimator = {
- import io.iohk.atala.prism.utils.IOUtils._
val clientConfig =
NodeConfig.cardanoConfig(ConfigFactory.load().getConfig("cardano"))
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepositorySpec.scala
index c67209d575..8366f2d128 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/AtalaObjectsTransactionsRepositorySpec.scala
@@ -2,13 +2,13 @@ package io.iohk.atala.prism.node.repositories
import cats.effect.IO
import cats.effect.unsafe.implicits.global
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.models.{BlockInfo, Ledger, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.models.{BlockInfo, Ledger, TransactionId, TransactionInfo}
import io.iohk.atala.prism.node.DataPreparation
import io.iohk.atala.prism.node.services.BlockProcessingServiceSpec
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.IOUtils._
import org.scalatest.OptionValues.convertOptionToValuable
import tofu.logging.Logs
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepositorySpec.scala
index f62cfb1eca..d368883c3e 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/CredentialBatchesRepositorySpec.scala
@@ -1,18 +1,16 @@
package io.iohk.atala.prism.node.repositories
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.credentials.CredentialBatchId
import io.iohk.atala.prism.crypto.{MerkleRoot, Sha256, Sha256Digest}
-import io.iohk.atala.prism.models.{DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{DidSuffix, Ledger, TransactionId}
import io.iohk.atala.prism.node.models.nodeState.{CredentialBatchState, LedgerData}
import org.scalatest.OptionValues._
-
import java.time.Instant
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import doobie.util.transactor.Transactor
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.DIDData
import tofu.logging.Logging
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/DIDDataRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/DIDDataRepositorySpec.scala
index d7e6e17518..1fbcea0670 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/DIDDataRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/DIDDataRepositorySpec.scala
@@ -2,17 +2,16 @@ package io.iohk.atala.prism.node.repositories
import cats.effect.IO
import cats.effect.unsafe.implicits.global
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, KeyUsage}
import org.scalatest.OptionValues._
import java.time.Instant
import io.iohk.atala.prism.identity.{PrismDid => DID}
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import tofu.logging.Logging.Make
import tofu.logging.Logging
diff --git a/common/src/test/scala/io/iohk/atala/prism/repositories/DockerPostgresService.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/DockerPostgresService.scala
similarity index 98%
rename from common/src/test/scala/io/iohk/atala/prism/repositories/DockerPostgresService.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/DockerPostgresService.scala
index 3fc5ae0178..955c785e50 100644
--- a/common/src/test/scala/io/iohk/atala/prism/repositories/DockerPostgresService.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/DockerPostgresService.scala
@@ -1,6 +1,4 @@
-package io.iohk.atala.prism.repositories
-
-import java.sql.DriverManager
+package io.iohk.atala.prism.node.repositories
import com.spotify.docker.client.DefaultDockerClient
import com.whisk.docker._
@@ -8,6 +6,7 @@ import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.matchers.must.Matchers._
+import java.sql.DriverManager
import scala.concurrent.{ExecutionContext, Future}
object DockerPostgresService extends DockerKit {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepositorySpec.scala
index 6e9e1f1964..211e7a04ef 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/KeyValuesRepositorySpec.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.repositories
import cats.effect.IO
import cats.effect.unsafe.implicits.global
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.repositories.daos.KeyValuesDAO.KeyValue
import tofu.logging.Logging
diff --git a/common/src/test/scala/io/iohk/atala/prism/repositories/PostgresMigrationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresMigrationSpec.scala
similarity index 97%
rename from common/src/test/scala/io/iohk/atala/prism/repositories/PostgresMigrationSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresMigrationSpec.scala
index ff4dd4d4cd..9ba7eda82c 100644
--- a/common/src/test/scala/io/iohk/atala/prism/repositories/PostgresMigrationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresMigrationSpec.scala
@@ -1,22 +1,21 @@
-package io.iohk.atala.prism.repositories
+package io.iohk.atala.prism.node.repositories
-import java.util
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import org.flywaydb.core.Flyway
import org.flywaydb.core.api.configuration.Configuration
+import org.flywaydb.core.api.migration.JavaMigration
import org.flywaydb.core.api.resolver.{Context, MigrationResolver, ResolvedMigration}
-import org.flywaydb.core.internal.resolver.java.ScanningJavaMigrationResolver
-import org.flywaydb.core.api.ClassProvider
-import org.flywaydb.core.api.ResourceProvider
+import org.flywaydb.core.api.{ClassProvider, ResourceProvider}
import org.flywaydb.core.internal.jdbc.JdbcConnectionFactory
import org.flywaydb.core.internal.parser.ParsingContext
+import org.flywaydb.core.internal.resolver.CompositeMigrationResolver
+import org.flywaydb.core.internal.resolver.java.ScanningJavaMigrationResolver
import org.flywaydb.core.internal.resolver.sql.SqlMigrationResolver
import org.flywaydb.core.internal.scanner.{LocationScannerCache, ResourceNameCache, Scanner}
import org.flywaydb.core.internal.sqlscript.{SqlScriptExecutorFactory, SqlScriptFactory}
import org.slf4j.LoggerFactory
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import org.flywaydb.core.api.migration.JavaMigration
-import org.flywaydb.core.internal.resolver.CompositeMigrationResolver
+import java.util
import scala.jdk.CollectionConverters._
/** This a helper to allow testing a specific sql migration by using flyway.
@@ -217,7 +216,7 @@ object PostgresMigrationSpec {
s"""|
|The given prefix wasn't found on the available migration scripts.
|Which means, there is likely a mistake.
- |
+ |
|Please take any prefix from the available scripts:
|${resolved.map(x => s"- ${x.getScript}").mkString("\n")}
""".stripMargin.trim
diff --git a/common/src/test/scala/io/iohk/atala/prism/repositories/PostgresRepositorySpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresRepositorySpec.scala
similarity index 96%
rename from common/src/test/scala/io/iohk/atala/prism/repositories/PostgresRepositorySpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresRepositorySpec.scala
index 0a490f150c..d42ab1e717 100644
--- a/common/src/test/scala/io/iohk/atala/prism/repositories/PostgresRepositorySpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/PostgresRepositorySpec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.repositories
+package io.iohk.atala.prism.node.repositories
import cats.effect.unsafe.implicits.global
import cats.effect.IO
@@ -6,7 +6,7 @@ import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import doobie.implicits._
-import io.iohk.atala.prism.db.TransactorForStreaming
+import io.iohk.atala.prism.node.db.TransactorForStreaming
import doobie.util.transactor
case class PostgresConfig(
@@ -63,7 +63,7 @@ abstract class PostgresRepositorySpec[F[_]]
}
}
- lazy val (database, releaseDatabase) =
+ val (database, releaseDatabase) =
TransactorFactory.transactor[IO](transactorConfig).allocated.unsafeRunSync()
lazy val databaseForStreaming = {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAOSpec.scala
index c3acc419f4..98bf3c59c2 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectTransactionSubmissionsDAOSpec.scala
@@ -4,11 +4,10 @@ import cats.effect.unsafe.implicits.global
import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.models.Ledger.InMemory
-import io.iohk.atala.prism.models.{Ledger, TransactionId, TransactionInfo}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.Ledger.InMemory
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.AtalaObjectTransactionSubmissionStatus.{InLedger, Pending}
import io.iohk.atala.prism.node.models.{
AtalaObjectId,
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAOSpec.scala
index 66084a023c..cde1f31e30 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaObjectsDAOSpec.scala
@@ -3,9 +3,9 @@ package io.iohk.atala.prism.node.repositories.daos
import cats.effect.unsafe.implicits.global
import cats.syntax.functor._
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.models.{BlockInfo, Ledger, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.models.{BlockInfo, Ledger, TransactionId, TransactionInfo}
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.models.AtalaObjectStatus.{Merged, Pending, Processed, Scheduled}
import io.iohk.atala.prism.node.models.{AtalaObjectId, AtalaObjectInfo, AtalaObjectStatus}
import io.iohk.atala.prism.protos.node_internal
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAOSpec.scala
index 4528791819..74264de35b 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/AtalaOperationsDAOSpec.scala
@@ -2,8 +2,8 @@ package io.iohk.atala.prism.node.repositories.daos
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.models.AtalaOperationId
+import io.iohk.atala.prism.node.models.AtalaOperationId
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.models.{AtalaObjectId, AtalaObjectStatus, AtalaOperationStatus}
import io.iohk.atala.prism.node.repositories.daos.AtalaObjectsDAO.AtalaObjectCreateData
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAOSpec.scala
index 97093185eb..2550727fbe 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ContextDAOSpec.scala
@@ -4,10 +4,9 @@ import doobie.util.transactor.Transactor
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, KeyUsage}
import io.iohk.atala.prism.node.repositories.{didSuffixFromDigest, digestGen}
diff --git a/common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDao.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDao.scala
similarity index 94%
rename from common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDao.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDao.scala
index 4fd2ffd1c1..6530be1475 100644
--- a/common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDao.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDao.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.daos
+package io.iohk.atala.prism.node.repositories.daos
import doobie.free.connection.ConnectionIO
import doobie.implicits._
diff --git a/common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDaoSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDaoSpec.scala
similarity index 89%
rename from common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDaoSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDaoSpec.scala
index 7799da634a..c6b4ca5261 100644
--- a/common/src/test/scala/io/iohk/atala/prism/daos/DbConfigDaoSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/DbConfigDaoSpec.scala
@@ -1,14 +1,13 @@
-package io.iohk.atala.prism.daos
+package io.iohk.atala.prism.node.repositories.daos
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
-// sbt "project common" "testOnly *daos.DbConfigDaoSpec"
class DbConfigDaoSpec extends AtalaWithPostgresSpec {
override protected def migrationScriptsLocation: String =
- "common/db/migration"
+ "db/testmigration"
"DbConfigDao" should {
"return None if a key doesn't exist" in {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/MetricsCountersDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/MetricsCountersDAOSpec.scala
index 094ab7a5bc..7d00e21a57 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/MetricsCountersDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/MetricsCountersDAOSpec.scala
@@ -2,8 +2,7 @@ package io.iohk.atala.prism.node.repositories.daos
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
class MetricsCountersDAOSpec extends AtalaWithPostgresSpec {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAOSpec.scala
index 2d3e2528e7..e181645495 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ProtocolVersionsDAOSpec.scala
@@ -2,7 +2,7 @@ package io.iohk.atala.prism.node.repositories.daos
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.node.models.ProtocolVersion.ProtocolVersion1_0
class ProtocolVersionsDAOSpec extends AtalaWithPostgresSpec {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAOSpec.scala
index e29d7260d1..f90bc20d9e 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/PublicKeysDAOSpec.scala
@@ -1,11 +1,10 @@
package io.iohk.atala.prism.node.repositories.daos
import java.time.Instant
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, KeyUsage}
import io.iohk.atala.prism.node.models.nodeState.LedgerData
import io.iohk.atala.prism.node.repositories.{didSuffixFromDigest, digestGen}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAOSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAOSpec.scala
index f60fbea772..b8e0a8714e 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAOSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/daos/ServicesDAOSpec.scala
@@ -4,10 +4,9 @@ import doobie.util.transactor.Transactor
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
-import io.iohk.atala.prism.models.{Ledger, TransactionId}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.{Ledger, TransactionId}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.nodeState.{DIDServiceState, LedgerData}
import io.iohk.atala.prism.node.models.{DIDData, DIDPublicKey, DIDService, KeyUsage}
import io.iohk.atala.prism.node.repositories.{didSuffixFromDigest, digestGen}
diff --git a/common/src/test/scala/io/iohk/atala/prism/repositories/ops/SqlTestOps.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/ops/SqlTestOps.scala
similarity index 94%
rename from common/src/test/scala/io/iohk/atala/prism/repositories/ops/SqlTestOps.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/repositories/ops/SqlTestOps.scala
index cbc66b96f2..3b704bda1e 100644
--- a/common/src/test/scala/io/iohk/atala/prism/repositories/ops/SqlTestOps.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/ops/SqlTestOps.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.repositories.ops
+package io.iohk.atala.prism.node.repositories.ops
import cats.effect.IO
import cats.effect.unsafe.implicits.global
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/repositories/package.scala b/node/src/test/scala/io/iohk/atala/prism/node/repositories/package.scala
index 26c19aa30b..fb8a58cb36 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/repositories/package.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/repositories/package.scala
@@ -1,7 +1,7 @@
package io.iohk.atala.prism.node
import io.iohk.atala.prism.crypto.Sha256Digest
-import io.iohk.atala.prism.models.DidSuffix
+import io.iohk.atala.prism.node.models.DidSuffix
import io.iohk.atala.prism.node.models.CredentialId
package object repositories {
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/services/BlockProcessingServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/services/BlockProcessingServiceSpec.scala
index 595bc5746c..25fd704751 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/services/BlockProcessingServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/services/BlockProcessingServiceSpec.scala
@@ -3,13 +3,12 @@ package io.iohk.atala.prism.node.services
import cats.effect.unsafe.implicits.global
import com.google.protobuf.ByteString
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.Sha256
import io.iohk.atala.prism.crypto.keys.ECPrivateKey
import io.iohk.atala.prism.protos.models.TimestampInfo
-import io.iohk.atala.prism.models.{AtalaOperationId, DidSuffix, Ledger, TransactionId}
-import io.iohk.atala.prism.node.DataPreparation
+import io.iohk.atala.prism.node.models.{AtalaOperationId, DidSuffix, Ledger, TransactionId}
+import io.iohk.atala.prism.node.{AtalaWithPostgresSpec, DataPreparation}
import io.iohk.atala.prism.node.models.{AtalaOperationInfo, AtalaOperationStatus}
import io.iohk.atala.prism.node.operations.{
ApplyOperationConfig,
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceIntegrationSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceIntegrationSpec.scala
index 4bb7d0fa96..ff9577df29 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceIntegrationSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceIntegrationSpec.scala
@@ -11,10 +11,10 @@ import io.iohk.atala.prism.node.repositories.KeyValuesRepository
import io.iohk.atala.prism.node.services.CardanoLedgerService.CardanoNetwork
import io.iohk.atala.prism.node.services.models.testing.TestAtalaHandlers
import io.iohk.atala.prism.protos.node_internal
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.utils.IOUtils._
import org.scalatest.Ignore
import org.scalatest.OptionValues._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceSpec.scala
index fbb0befc0c..a0d3bb36f1 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/services/CardanoLedgerServiceSpec.scala
@@ -4,10 +4,10 @@ import cats.data.ReaderT
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import io.circe.Json
-import io.iohk.atala.prism.AtalaWithPostgresSpec
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.models._
import io.iohk.atala.prism.node.cardano.CardanoClient
import io.iohk.atala.prism.node.cardano.dbsync.CardanoDbSyncClientImpl
import io.iohk.atala.prism.node.cardano.dbsync.repositories.CardanoBlockRepository
@@ -21,11 +21,10 @@ import io.iohk.atala.prism.node.services.CardanoLedgerService.{CardanoBlockHandl
import io.iohk.atala.prism.node.services.models.testing.TestAtalaHandlers
import io.iohk.atala.prism.node.services.models.{AtalaObjectNotification, AtalaObjectNotificationHandler}
import io.iohk.atala.prism.protos.node_internal
-import io.iohk.atala.prism.utils.BytesOps
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.BytesOps
+import io.iohk.atala.prism.node.utils.IOUtils._
import org.scalatest.OptionValues._
import tofu.logging.Logs
-
class CardanoLedgerServiceSpec extends AtalaWithPostgresSpec {
private val logs = Logs.withContext[IO, IOWithTraceIdContext]
private val network = CardanoNetwork.Testnet
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/services/ObjectManagementServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/services/ObjectManagementServiceSpec.scala
index 53756b265d..1cdd655a18 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/services/ObjectManagementServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/services/ObjectManagementServiceSpec.scala
@@ -5,13 +5,12 @@ import cats.effect.IO
import cats.effect.unsafe.implicits.global
import doobie.free.connection
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.EC.{INSTANCE => EC}
import io.iohk.atala.prism.crypto.Sha256
import io.iohk.atala.prism.crypto.keys.ECKeyPair
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models._
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
import io.iohk.atala.prism.node.DataPreparation._
import io.iohk.atala.prism.node.cardano.models.CardanoWalletError
import io.iohk.atala.prism.node.errors.NodeError.{
@@ -36,7 +35,7 @@ import io.iohk.atala.prism.node.services.BlockProcessingServiceSpec.{createDidOp
import io.iohk.atala.prism.node.services.models.AtalaObjectNotification
import io.iohk.atala.prism.node.{DataPreparation, PublicationInfo, UnderlyingLedger}
import io.iohk.atala.prism.protos.{node_internal, node_models}
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.IOUtils._
import org.mockito
import org.mockito.captor.ArgCaptor
import org.mockito.scalatest.{MockitoSugar, ResetMocksAfterEachTest}
diff --git a/node/src/test/scala/io/iohk/atala/prism/node/services/SubmissionServiceSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/services/SubmissionServiceSpec.scala
index 616a46ae68..2ce4c67ba6 100644
--- a/node/src/test/scala/io/iohk/atala/prism/node/services/SubmissionServiceSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/services/SubmissionServiceSpec.scala
@@ -4,11 +4,11 @@ import cats.data.ReaderT
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import doobie.implicits._
-import io.iohk.atala.prism.AtalaWithPostgresSpec
+import io.iohk.atala.prism.node.AtalaWithPostgresSpec
import io.iohk.atala.prism.crypto.Sha256
-import io.iohk.atala.prism.logging.TraceId
-import io.iohk.atala.prism.logging.TraceId.IOWithTraceIdContext
-import io.iohk.atala.prism.models.{Ledger, TransactionDetails, TransactionId, TransactionStatus}
+import io.iohk.atala.prism.node.logging.TraceId
+import io.iohk.atala.prism.node.logging.TraceId.IOWithTraceIdContext
+import io.iohk.atala.prism.node.models.{Ledger, TransactionDetails, TransactionId, TransactionStatus}
import io.iohk.atala.prism.node.cardano.models.AtalaObjectMetadata.estimateTxMetadataSize
import io.iohk.atala.prism.node.cardano.models.{CardanoWalletError, CardanoWalletErrorCode}
import io.iohk.atala.prism.node.models.AtalaObjectTransactionSubmissionStatus
@@ -24,7 +24,7 @@ import io.iohk.atala.prism.node.repositories.{
import io.iohk.atala.prism.node.repositories.daos.AtalaObjectsDAO
import io.iohk.atala.prism.protos.node_internal
import io.iohk.atala.prism.protos.node_models.SignedAtalaOperation
-import io.iohk.atala.prism.utils.IOUtils._
+import io.iohk.atala.prism.node.utils.IOUtils._
import org.mockito.scalatest.{MockitoSugar, ResetMocksAfterEachTest}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.OptionValues._
diff --git a/common/src/test/scala/io/iohk/atala/prism/utils/Base64Spec.scala b/node/src/test/scala/io/iohk/atala/prism/node/utils/Base64Spec.scala
similarity index 95%
rename from common/src/test/scala/io/iohk/atala/prism/utils/Base64Spec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/utils/Base64Spec.scala
index b7b6bfd47b..c412ee0399 100644
--- a/common/src/test/scala/io/iohk/atala/prism/utils/Base64Spec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/utils/Base64Spec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
diff --git a/common/src/test/scala/io/iohk/atala/prism/utils/BytesOpsSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/utils/BytesOpsSpec.scala
similarity index 95%
rename from common/src/test/scala/io/iohk/atala/prism/utils/BytesOpsSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/utils/BytesOpsSpec.scala
index 1b4f9f6d1e..3db17f5fff 100644
--- a/common/src/test/scala/io/iohk/atala/prism/utils/BytesOpsSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/utils/BytesOpsSpec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import org.scalacheck.Gen
import org.scalatest.matchers.must.Matchers._
diff --git a/common/src/test/scala/io/iohk/atala/prism/utils/GrpcUtilsSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/utils/GrpcUtilsSpec.scala
similarity index 97%
rename from common/src/test/scala/io/iohk/atala/prism/utils/GrpcUtilsSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/utils/GrpcUtilsSpec.scala
index 2f038cd9cf..fcd2099afb 100644
--- a/common/src/test/scala/io/iohk/atala/prism/utils/GrpcUtilsSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/utils/GrpcUtilsSpec.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import com.google.protobuf.ByteString
import io.iohk.atala.prism.protos.node_api.ScheduleOperationsResponse
diff --git a/common/src/test/scala/io/iohk/atala/prism/utils/NodeClientUtils.scala b/node/src/test/scala/io/iohk/atala/prism/node/utils/NodeClientUtils.scala
similarity index 97%
rename from common/src/test/scala/io/iohk/atala/prism/utils/NodeClientUtils.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/utils/NodeClientUtils.scala
index 3ce529f46a..8c5ce3048b 100644
--- a/common/src/test/scala/io/iohk/atala/prism/utils/NodeClientUtils.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/utils/NodeClientUtils.scala
@@ -1,4 +1,4 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import com.google.protobuf.ByteString
import io.iohk.atala.prism.credentials.CredentialBatchId
diff --git a/common/src/test/scala/io/iohk/atala/prism/utils/UtilsSpec.scala b/node/src/test/scala/io/iohk/atala/prism/node/utils/UtilsSpec.scala
similarity index 88%
rename from common/src/test/scala/io/iohk/atala/prism/utils/UtilsSpec.scala
rename to node/src/test/scala/io/iohk/atala/prism/node/utils/UtilsSpec.scala
index 8026fb48a4..063f5bc4ec 100644
--- a/common/src/test/scala/io/iohk/atala/prism/utils/UtilsSpec.scala
+++ b/node/src/test/scala/io/iohk/atala/prism/node/utils/UtilsSpec.scala
@@ -1,8 +1,8 @@
-package io.iohk.atala.prism.utils
+package io.iohk.atala.prism.node.utils
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
-import io.iohk.atala.prism.utils.syntax._
+import io.iohk.atala.prism.node.utils.syntax._
import java.time.Instant
import scala.util.Random