diff --git a/.github/workflows/indexer-build-and-push-dev-staging.yml b/.github/workflows/indexer-build-and-push-dev-staging.yml index 5a98b72552..c314f087c1 100644 --- a/.github/workflows/indexer-build-and-push-dev-staging.yml +++ b/.github/workflows/indexer-build-and-push-dev-staging.yml @@ -6,6 +6,7 @@ on: # yamllint disable-line rule:truthy - main - 'release/indexer/v[0-9]+.[0-9]+.x' # e.g. release/indexer/v0.1.x - 'release/indexer/v[0-9]+.x' # e.g. release/indexer/v1.x + - 'chenyao/fix-vault-orderbook-flickering' # TODO(DEC-837): Customize github build and push to ECR by service with paths jobs: diff --git a/.github/workflows/protocol-build-and-push.yml b/.github/workflows/protocol-build-and-push.yml index fc26dd6e69..d9c5e4d920 100644 --- a/.github/workflows/protocol-build-and-push.yml +++ b/.github/workflows/protocol-build-and-push.yml @@ -6,6 +6,7 @@ on: # yamllint disable-line rule:truthy - main - 'release/protocol/v[0-9]+.[0-9]+.x' # e.g. release/protocol/v0.1.x - 'release/protocol/v[0-9]+.x' # e.g. release/protocol/v1.x + - 'chenyao/fix-vault-orderbook-flickering' jobs: build-and-push-dev: diff --git a/indexer/packages/redis/src/scripts/place_order.lua b/indexer/packages/redis/src/scripts/place_order.lua index 6c04e9bbfc..4f07409825 100644 --- a/indexer/packages/redis/src/scripts/place_order.lua +++ b/indexer/packages/redis/src/scripts/place_order.lua @@ -66,7 +66,7 @@ else -- to "false". redis.call("set", orderKey, newOrder) -- refer to the above comment on order data format - redis.call("set", orderDataKey, newOrderExpiry .. "_" .. oldTotalFilledQuantums .. "_false") + redis.call("set", orderDataKey, newOrderExpiry .. "_" .. oldTotalFilledQuantums .. "_true") -- Long-term orders will be on-chain, so we only need to store expiry data for short-term orders if isShortTermOrder then -- The expiry is guaranteed to be different, so overwrite the old one from the expiry cache diff --git a/indexer/services/vulcan/__tests__/handlers/order-place-handler.test.ts b/indexer/services/vulcan/__tests__/handlers/order-place-handler.test.ts index 4fff013d98..d7f5c86e72 100644 --- a/indexer/services/vulcan/__tests__/handlers/order-place-handler.test.ts +++ b/indexer/services/vulcan/__tests__/handlers/order-place-handler.test.ts @@ -536,9 +536,14 @@ describe('order-place-handler', () => { ) => { const oldOrderTotalFilled: number = 10; const oldPriceLevelInitialQuantums: number = Number(initialOrderToPlace.quantums) * 2; + // After replacing the order the quantums at the price level of the old order should be: + // initial quantums - (old order quantums - old order total filled) + const expectedPriceLevelQuantums: number = ( + oldPriceLevelInitialQuantums - (Number(initialOrderToPlace.quantums) - oldOrderTotalFilled) + ); const expectedPriceLevel: PriceLevel = { humanPrice: expectedRedisOrder.price, - quantums: oldPriceLevelInitialQuantums.toString(), + quantums: expectedPriceLevelQuantums.toString(), lastUpdated: expect.stringMatching(/^[0-9]{10}$/), }; @@ -1082,6 +1087,11 @@ describe('order-place-handler', () => { APIOrderStatusEnum.BEST_EFFORT_OPENED, true, ); + + expect(logger.info).toHaveBeenCalledWith(expect.objectContaining({ + at: 'OrderPlaceHandler#handle', + message: 'Total filled of order in Redis exceeds order quantums.', + })); }); }); }); diff --git a/indexer/services/vulcan/src/handlers/order-place-handler.ts b/indexer/services/vulcan/src/handlers/order-place-handler.ts index d32021c43b..0a9d16e03f 100644 --- a/indexer/services/vulcan/src/handlers/order-place-handler.ts +++ b/indexer/services/vulcan/src/handlers/order-place-handler.ts @@ -6,6 +6,7 @@ import { OrderTable, PerpetualMarketFromDatabase, perpetualMarketRefresher, + protocolTranslations, } from '@dydxprotocol-indexer/postgres'; import { CanceledOrdersCache, @@ -13,12 +14,14 @@ import { PlaceOrderResult, StatefulOrderUpdatesCache, convertToRedisOrder, + OrderbookLevelsCache, } from '@dydxprotocol-indexer/redis'; import { getOrderIdHash, isLongTermOrder, isStatefulOrder, ORDER_FLAG_SHORT_TERM, + requiresImmediateExecution, } from '@dydxprotocol-indexer/v4-proto-parser'; import { IndexerOrder, @@ -28,6 +31,7 @@ import { OrderUpdateV1, RedisOrder, } from '@dydxprotocol-indexer/v4-protos'; +import Big from 'big.js'; import { IHeaders, Message } from 'kafkajs'; import config from '../config'; @@ -96,6 +100,12 @@ export class OrderPlaceHandler extends Handler { stats.increment(`${config.SERVICE_NAME}.place_order_handler.replaced_order`, 1); } + await this.updatePriceLevel( + placeOrderResult, + perpetualMarket, + update, + ); + // TODO(CLOB-597): Remove this logic and log erorrs once best-effort-open is not sent for // stateful orders in the protocol if (this.shouldSendSubaccountMessage( @@ -158,6 +168,83 @@ export class OrderPlaceHandler extends Handler { } } + /** + * Updates the price level given the result of calling `placeOrder`. + * @param result `PlaceOrderResult` from calling `placeOrder` + * @param perpetualMarket Perpetual market object corresponding to the perpetual market of the + * order + * @param update Off-chain update + * @returns + */ + // eslint-disable-next-line @typescript-eslint/require-await + protected async updatePriceLevel( + result: PlaceOrderResult, + perpetualMarket: PerpetualMarketFromDatabase, + update: OffChainUpdateV1, + ): Promise { + // TODO(DEC-1339): Update price levels based on if the order is reduce-only and if the replaced + // order is reduce-only. + if ( + result.replaced !== true || + result.restingOnBook !== true || + requiresImmediateExecution(result.oldOrder!.order!.timeInForce) + ) { + return undefined; + } + + const remainingSizeDeltaInQuantums: Big = this.getRemainingSizeDeltaInQuantums(result); + + if (remainingSizeDeltaInQuantums.eq(0)) { + // No update to the price level if remaining quantums = 0 + // An order could have remaining quantums = 0 intra-block, as an order is only considered + // filled once the fills are committed in a block + return undefined; + } + + if (remainingSizeDeltaInQuantums.lt(0)) { + // Log error and skip updating orderbook levels if old order had negative remaining + // quantums + logger.info({ + at: 'OrderPlaceHandler#handle', + message: 'Total filled of order in Redis exceeds order quantums.', + placeOrderResult: result, + update, + }); + stats.increment(`${config.SERVICE_NAME}.order_place_total_filled_exceeds_size`, 1); + return undefined; + } + + // If the remaining size is not equal or less than 0, it must be greater than 0. + // Remove the remaining size of the replaced order from the orderbook, by decrementing + // the total size of orders at the price of the replaced order + return runFuncWithTimingStat( + OrderbookLevelsCache.updatePriceLevel({ + ticker: perpetualMarket.ticker, + side: protocolTranslations.protocolOrderSideToOrderSide(result.oldOrder!.order!.side), + humanPrice: result.oldOrder!.price, + // Delta should be -1 * remaining size of order in quantums and an integer + sizeDeltaInQuantums: remainingSizeDeltaInQuantums.mul(-1).toFixed(0), + client: redisClient, + }), + this.generateTimingStatsOptions('update_price_level'), + ); + } + + /** + * Gets the remaining size of the old order if the order was replaced. + * @param result Result of placing an order, should be for a replaced order so both `oldOrder` and + * `oldTotalFilledQuantums` properties should exist on the place order result. + * @returns Remaining size of the old order that was replaced. + */ + protected getRemainingSizeDeltaInQuantums(result: PlaceOrderResult): Big { + const sizeDeltaInQuantums: Big = Big( + result.oldOrder!.order!.quantums.toString(), + ).minus( + result.oldTotalFilledQuantums!, + ); + return sizeDeltaInQuantums; + } + /** * Determine whether to send a subaccount websocket message given the order place. * @param orderPlace diff --git a/protocol/app/app.go b/protocol/app/app.go index a17b6e48bf..40a2bf6aa3 100644 --- a/protocol/app/app.go +++ b/protocol/app/app.go @@ -1088,6 +1088,7 @@ func New( app.PricesKeeper, app.SendingKeeper, app.SubaccountsKeeper, + app.IndexerEventManager, []string{ lib.GovModuleAddress.String(), delaymsgmoduletypes.ModuleAddress.String(), diff --git a/protocol/mocks/ClobKeeper.go b/protocol/mocks/ClobKeeper.go index b1a869afa3..9cac719a9c 100644 --- a/protocol/mocks/ClobKeeper.go +++ b/protocol/mocks/ClobKeeper.go @@ -626,16 +626,16 @@ func (_m *ClobKeeper) GetSubaccountMaxNotionalLiquidatable(ctx types.Context, su } // HandleMsgCancelOrder provides a mock function with given fields: ctx, msg -func (_m *ClobKeeper) HandleMsgCancelOrder(ctx types.Context, msg *clobtypes.MsgCancelOrder) error { - ret := _m.Called(ctx, msg) +func (_m *ClobKeeper) HandleMsgCancelOrder(ctx types.Context, msg *clobtypes.MsgCancelOrder, isInternalOrder bool) error { + ret := _m.Called(ctx, msg, isInternalOrder) if len(ret) == 0 { panic("no return value specified for HandleMsgCancelOrder") } var r0 error - if rf, ok := ret.Get(0).(func(types.Context, *clobtypes.MsgCancelOrder) error); ok { - r0 = rf(ctx, msg) + if rf, ok := ret.Get(0).(func(types.Context, *clobtypes.MsgCancelOrder, bool) error); ok { + r0 = rf(ctx, msg, isInternalOrder) } else { r0 = ret.Error(0) } diff --git a/protocol/testutil/keeper/vault.go b/protocol/testutil/keeper/vault.go index cbcc87e5d6..5a82da7a61 100644 --- a/protocol/testutil/keeper/vault.go +++ b/protocol/testutil/keeper/vault.go @@ -58,6 +58,7 @@ func createVaultKeeper( &mocks.PricesKeeper{}, &mocks.SendingKeeper{}, &mocks.SubaccountsKeeper{}, + &mocks.IndexerEventManager{}, []string{ lib.GovModuleAddress.String(), delaymsgtypes.ModuleAddress.String(), diff --git a/protocol/x/clob/keeper/msg_server_cancel_orders.go b/protocol/x/clob/keeper/msg_server_cancel_orders.go index cd241c572e..295fe99b93 100644 --- a/protocol/x/clob/keeper/msg_server_cancel_orders.go +++ b/protocol/x/clob/keeper/msg_server_cancel_orders.go @@ -23,7 +23,7 @@ func (k msgServer) CancelOrder( ) (resp *types.MsgCancelOrderResponse, err error) { ctx := lib.UnwrapSDKContext(goCtx, types.ModuleName) - if err := k.Keeper.HandleMsgCancelOrder(ctx, msg); err != nil { + if err := k.Keeper.HandleMsgCancelOrder(ctx, msg, false); err != nil { return nil, err } @@ -37,6 +37,7 @@ func (k msgServer) CancelOrder( func (k Keeper) HandleMsgCancelOrder( ctx sdk.Context, msg *types.MsgCancelOrder, + isInternalOrder bool, ) (err error) { lib.AssertDeliverTxMode(ctx) @@ -111,17 +112,19 @@ func (k Keeper) HandleMsgCancelOrder( k.MustSetProcessProposerMatchesEvents(ctx, processProposerMatchesEvents) // 4. Add the relevant on-chain Indexer event for the cancellation. - k.GetIndexerEventManager().AddTxnEvent( - ctx, - indexerevents.SubtypeStatefulOrder, - indexerevents.StatefulOrderEventVersion, - indexer_manager.GetBytes( - indexerevents.NewStatefulOrderRemovalEvent( - msg.OrderId, - indexershared.OrderRemovalReason_ORDER_REMOVAL_REASON_USER_CANCELED, + if !isInternalOrder { + k.GetIndexerEventManager().AddTxnEvent( + ctx, + indexerevents.SubtypeStatefulOrder, + indexerevents.StatefulOrderEventVersion, + indexer_manager.GetBytes( + indexerevents.NewStatefulOrderRemovalEvent( + msg.OrderId, + indexershared.OrderRemovalReason_ORDER_REMOVAL_REASON_USER_CANCELED, + ), ), - ), - ) + ) + } return nil } diff --git a/protocol/x/clob/keeper/msg_server_place_order.go b/protocol/x/clob/keeper/msg_server_place_order.go index 84d1222dda..e995ac1953 100644 --- a/protocol/x/clob/keeper/msg_server_place_order.go +++ b/protocol/x/clob/keeper/msg_server_place_order.go @@ -88,13 +88,15 @@ func (k Keeper) HandleMsgPlaceOrder( // 2. Return an error if an associated cancellation or removal already exists in the current block. processProposerMatchesEvents := k.GetProcessProposerMatchesEvents(ctx) - cancelledOrderIds := lib.UniqueSliceToSet(processProposerMatchesEvents.PlacedStatefulCancellationOrderIds) - if _, found := cancelledOrderIds[order.GetOrderId()]; found { - return errorsmod.Wrapf( - types.ErrStatefulOrderPreviouslyCancelled, - "PlaceOrder: order (%+v)", - order, - ) + if !isInternalOrder { // If vault order, we allow the order to replace a cancelled order with the same order ID + cancelledOrderIds := lib.UniqueSliceToSet(processProposerMatchesEvents.PlacedStatefulCancellationOrderIds) + if _, found := cancelledOrderIds[order.GetOrderId()]; found { + return errorsmod.Wrapf( + types.ErrStatefulOrderPreviouslyCancelled, + "PlaceOrder: order (%+v)", + order, + ) + } } removedOrderIds := lib.UniqueSliceToSet(processProposerMatchesEvents.RemovedStatefulOrderIds) if _, found := removedOrderIds[order.GetOrderId()]; found { @@ -115,31 +117,35 @@ func (k Keeper) HandleMsgPlaceOrder( // 4. Emit the new order placement indexer event. if order.IsConditionalOrder() { - k.GetIndexerEventManager().AddTxnEvent( - ctx, - indexerevents.SubtypeStatefulOrder, - indexerevents.StatefulOrderEventVersion, - indexer_manager.GetBytes( - indexerevents.NewConditionalOrderPlacementEvent( - order, + if !isInternalOrder { + k.GetIndexerEventManager().AddTxnEvent( + ctx, + indexerevents.SubtypeStatefulOrder, + indexerevents.StatefulOrderEventVersion, + indexer_manager.GetBytes( + indexerevents.NewConditionalOrderPlacementEvent( + order, + ), ), - ), - ) + ) + } processProposerMatchesEvents.PlacedConditionalOrderIds = append( processProposerMatchesEvents.PlacedConditionalOrderIds, order.OrderId, ) } else { - k.GetIndexerEventManager().AddTxnEvent( - ctx, - indexerevents.SubtypeStatefulOrder, - indexerevents.StatefulOrderEventVersion, - indexer_manager.GetBytes( - indexerevents.NewLongTermOrderPlacementEvent( - order, + if !isInternalOrder { + k.GetIndexerEventManager().AddTxnEvent( + ctx, + indexerevents.SubtypeStatefulOrder, + indexerevents.StatefulOrderEventVersion, + indexer_manager.GetBytes( + indexerevents.NewLongTermOrderPlacementEvent( + order, + ), ), - ), - ) + ) + } processProposerMatchesEvents.PlacedLongTermOrderIds = append( processProposerMatchesEvents.PlacedLongTermOrderIds, order.OrderId, diff --git a/protocol/x/clob/keeper/msg_server_place_order_test.go b/protocol/x/clob/keeper/msg_server_place_order_test.go index 0ddcb99da2..3ea8a9ddf9 100644 --- a/protocol/x/clob/keeper/msg_server_place_order_test.go +++ b/protocol/x/clob/keeper/msg_server_place_order_test.go @@ -435,13 +435,12 @@ func TestHandleMsgPlaceOrder(t *testing.T) { removalExists: false, equityTierLimitExists: true, }, - "Error - Place an Internal Order, Order Already Cancelled": { + "Success - Place an Internal Order, Order Already Cancelled": { isInternalOrder: true, assetQuantums: -1_000_000_000, cancellationExists: true, removalExists: false, equityTierLimitExists: true, - expectedError: types.ErrStatefulOrderPreviouslyCancelled, }, "Error - Place an Internal Order, Order Already Removed": { isInternalOrder: true, diff --git a/protocol/x/clob/types/clob_keeper.go b/protocol/x/clob/types/clob_keeper.go index 97d5dce483..9c5fd0662b 100644 --- a/protocol/x/clob/types/clob_keeper.go +++ b/protocol/x/clob/types/clob_keeper.go @@ -46,6 +46,7 @@ type ClobKeeper interface { HandleMsgCancelOrder( ctx sdk.Context, msg *MsgCancelOrder, + isInternalOrder bool, ) (err error) HandleMsgPlaceOrder( ctx sdk.Context, diff --git a/protocol/x/vault/keeper/keeper.go b/protocol/x/vault/keeper/keeper.go index 4ff8e98253..deca94acf4 100644 --- a/protocol/x/vault/keeper/keeper.go +++ b/protocol/x/vault/keeper/keeper.go @@ -7,20 +7,22 @@ import ( storetypes "cosmossdk.io/store/types" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/dydxprotocol/v4-chain/protocol/indexer/indexer_manager" "github.com/dydxprotocol/v4-chain/protocol/lib" "github.com/dydxprotocol/v4-chain/protocol/x/vault/types" ) type ( Keeper struct { - cdc codec.BinaryCodec - storeKey storetypes.StoreKey - clobKeeper types.ClobKeeper - perpetualsKeeper types.PerpetualsKeeper - pricesKeeper types.PricesKeeper - sendingKeeper types.SendingKeeper - subaccountsKeeper types.SubaccountsKeeper - authorities map[string]struct{} + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + clobKeeper types.ClobKeeper + perpetualsKeeper types.PerpetualsKeeper + pricesKeeper types.PricesKeeper + sendingKeeper types.SendingKeeper + subaccountsKeeper types.SubaccountsKeeper + indexerEventManager indexer_manager.IndexerEventManager + authorities map[string]struct{} } ) @@ -32,20 +34,26 @@ func NewKeeper( pricesKeeper types.PricesKeeper, sendingKeeper types.SendingKeeper, subaccountsKeeper types.SubaccountsKeeper, + indexerEventManager indexer_manager.IndexerEventManager, authorities []string, ) *Keeper { return &Keeper{ - cdc: cdc, - storeKey: storeKey, - clobKeeper: clobKeeper, - perpetualsKeeper: perpetualsKeeper, - pricesKeeper: pricesKeeper, - sendingKeeper: sendingKeeper, - subaccountsKeeper: subaccountsKeeper, - authorities: lib.UniqueSliceToSet(authorities), + cdc: cdc, + storeKey: storeKey, + clobKeeper: clobKeeper, + perpetualsKeeper: perpetualsKeeper, + pricesKeeper: pricesKeeper, + sendingKeeper: sendingKeeper, + subaccountsKeeper: subaccountsKeeper, + indexerEventManager: indexerEventManager, + authorities: lib.UniqueSliceToSet(authorities), } } +func (k Keeper) GetIndexerEventManager() indexer_manager.IndexerEventManager { + return k.indexerEventManager +} + func (k Keeper) HasAuthority(authority string) bool { _, ok := k.authorities[authority] return ok diff --git a/protocol/x/vault/keeper/orders.go b/protocol/x/vault/keeper/orders.go index 94e3e838d3..d2255079f1 100644 --- a/protocol/x/vault/keeper/orders.go +++ b/protocol/x/vault/keeper/orders.go @@ -7,6 +7,9 @@ import ( errorsmod "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" + indexerevents "github.com/dydxprotocol/v4-chain/protocol/indexer/events" + "github.com/dydxprotocol/v4-chain/protocol/indexer/indexer_manager" + indexershared "github.com/dydxprotocol/v4-chain/protocol/indexer/shared/types" "github.com/dydxprotocol/v4-chain/protocol/lib" "github.com/dydxprotocol/v4-chain/protocol/lib/log" "github.com/dydxprotocol/v4-chain/protocol/lib/metrics" @@ -25,6 +28,7 @@ func (k Keeper) RefreshAllVaultOrders(ctx sdk.Context) { defer totalSharesIterator.Close() for ; totalSharesIterator.Valid(); totalSharesIterator.Next() { vaultId, err := types.GetVaultIdFromStateKey(totalSharesIterator.Key()) + if err != nil { log.ErrorLogWithError(ctx, "Failed to get vault ID from state key", err) continue @@ -80,29 +84,31 @@ func (k Keeper) RefreshVaultClobOrders(ctx sdk.Context, vaultId types.VaultId) ( return err } orderExpirationSeconds := k.GetParams(ctx).OrderExpirationSeconds - for _, order := range ordersToCancel { - if _, exists := k.clobKeeper.GetLongTermOrderPlacement(ctx, order.OrderId); exists { + replacedOrders := make([]*clobtypes.Order, len(ordersToCancel)) + for i, order := range ordersToCancel { + if oldOrderPlacement, exists := k.clobKeeper.GetLongTermOrderPlacement(ctx, order.OrderId); exists { err := k.clobKeeper.HandleMsgCancelOrder(ctx, clobtypes.NewMsgCancelOrderStateful( order.OrderId, uint32(ctx.BlockTime().Unix())+orderExpirationSeconds, - )) + ), true) if err != nil { log.ErrorLogWithError(ctx, "Failed to cancel order", err, "order", order, "vaultId", vaultId) } + replacedOrders[i] = &oldOrderPlacement.Order vaultId.IncrCounterWithLabels( metrics.VaultCancelOrder, metrics.GetLabelForBoolValue(metrics.Success, err == nil), ) } } - // Place new CLOB orders. ordersToPlace, err := k.GetVaultClobOrders(ctx, vaultId) if err != nil { log.ErrorLogWithError(ctx, "Failed to get vault clob orders to place", err, "vaultId", vaultId) return err } - for _, order := range ordersToPlace { + + for i, order := range ordersToPlace { err := k.PlaceVaultClobOrder(ctx, order) if err != nil { log.ErrorLogWithError(ctx, "Failed to place order", err, "order", order, "vaultId", vaultId) @@ -111,8 +117,36 @@ func (k Keeper) RefreshVaultClobOrders(ctx sdk.Context, vaultId types.VaultId) ( metrics.VaultPlaceOrder, metrics.GetLabelForBoolValue(metrics.Success, err == nil), ) - } + // Send indexer messages. + // If the price of the old and new orders are different, send a cancel and a place message + // Otherwise, send an order place message only. + replacedOrder := replacedOrders[i] + if replacedOrder != nil && replacedOrder.OrderId == order.OrderId && replacedOrder.Subticks != order.Subticks { + k.GetIndexerEventManager().AddTxnEvent( + ctx, + indexerevents.SubtypeStatefulOrder, + indexerevents.StatefulOrderEventVersion, + indexer_manager.GetBytes( + indexerevents.NewStatefulOrderRemovalEvent( + replacedOrder.OrderId, + indexershared.OrderRemovalReason_ORDER_REMOVAL_REASON_USER_CANCELED, + ), + ), + ) + } + + k.GetIndexerEventManager().AddTxnEvent( + ctx, + indexerevents.SubtypeStatefulOrder, + indexerevents.StatefulOrderEventVersion, + indexer_manager.GetBytes( + indexerevents.NewLongTermOrderPlacementEvent( + *order, + ), + ), + ) + } return nil } @@ -329,11 +363,6 @@ func (k Keeper) GetVaultClobOrders( // GetVaultClobOrderClientId returns the client ID for a CLOB order where // - 1st bit is `side-1` (subtract 1 as buy_side = 1, sell_side = 2) // -// - 2nd bit is `block height % 2` -// - block height bit alternates between 0 and 1 to ensure that client IDs -// are different in two consecutive blocks (otherwise, order placement would -// fail because the same order IDs are already marked for cancellation) -// // - next 8 bits are `layer` func (k Keeper) GetVaultClobOrderClientId( ctx sdk.Context, @@ -343,12 +372,9 @@ func (k Keeper) GetVaultClobOrderClientId( sideBit := uint32(side - 1) sideBit <<= 31 - blockHeightBit := uint32(ctx.BlockHeight() % 2) - blockHeightBit <<= 30 - - layerBits := uint32(layer) << 22 + layerBits := uint32(layer) << 23 - return sideBit | blockHeightBit | layerBits + return sideBit | layerBits } // PlaceVaultClobOrder places a vault CLOB order as an order internal to the protocol, diff --git a/protocol/x/vault/keeper/orders_test.go b/protocol/x/vault/keeper/orders_test.go index 54715b8164..ea87cccabd 100644 --- a/protocol/x/vault/keeper/orders_test.go +++ b/protocol/x/vault/keeper/orders_test.go @@ -158,24 +158,6 @@ func TestRefreshAllVaultOrders(t *testing.T) { allStatefulOrders := tApp.App.ClobKeeper.GetAllStatefulOrders(ctx) require.Len(t, allStatefulOrders, 0) - // Simulate vault orders placed in last block. - numPreviousOrders := 0 - for i, vaultId := range tc.vaultIds { - if tc.totalShares[i].Sign() > 0 && tc.assetQuantums[i].Cmp(tc.activationThresholdQuoteQuantums) >= 0 { - orders, err := tApp.App.VaultKeeper.GetVaultClobOrders( - ctx.WithBlockHeight(ctx.BlockHeight()-1), - vaultId, - ) - require.NoError(t, err) - for _, order := range orders { - err := tApp.App.VaultKeeper.PlaceVaultClobOrder(ctx, order) - require.NoError(t, err) - } - numPreviousOrders += len(orders) - } - } - require.Len(t, tApp.App.ClobKeeper.GetAllStatefulOrders(ctx), numPreviousOrders) - // Refresh all vault orders. tApp.App.VaultKeeper.RefreshAllVaultOrders(ctx) @@ -696,8 +678,6 @@ func TestGetVaultClobOrderClientId(t *testing.T) { /* --- Setup --- */ // side. side clobtypes.Order_Side - // block height. - blockHeight int64 // layer. layer uint8 @@ -705,55 +685,35 @@ func TestGetVaultClobOrderClientId(t *testing.T) { // Expected client ID. expectedClientId uint32 }{ - "Buy, Block Height Odd, Layer 1": { - side: clobtypes.Order_SIDE_BUY, // 0<<31 - blockHeight: 1, // 1<<30 - layer: 1, // 1<<22 - expectedClientId: 0<<31 | 1<<30 | 1<<22, - }, - "Buy, Block Height Even, Layer 1": { + "Buy, Layer 1": { side: clobtypes.Order_SIDE_BUY, // 0<<31 - blockHeight: 2, // 0<<30 - layer: 1, // 1<<22 - expectedClientId: 0<<31 | 0<<30 | 1<<22, + layer: 1, // 1<<23 + expectedClientId: 0<<31 | 1<<23, }, - "Sell, Block Height Odd, Layer 2": { + "Sell, Layer 2": { side: clobtypes.Order_SIDE_SELL, // 1<<31 - blockHeight: 1, // 1<<30 - layer: 2, // 2<<22 - expectedClientId: 1<<31 | 1<<30 | 2<<22, + layer: 2, // 2<<23 + expectedClientId: 1<<31 | 2<<23, }, - "Sell, Block Height Even, Layer 2": { - side: clobtypes.Order_SIDE_SELL, // 1<<31 - blockHeight: 2, // 0<<30 - layer: 2, // 2<<22 - expectedClientId: 1<<31 | 0<<30 | 2<<22, - }, - "Buy, Block Height Even, Layer Max Uint8": { + "Buy, Layer Max Uint8": { side: clobtypes.Order_SIDE_BUY, // 0<<31 - blockHeight: 123456, // 0<<30 - layer: math.MaxUint8, // 255<<22 - expectedClientId: 0<<31 | 0<<30 | 255<<22, + layer: math.MaxUint8, // 255<<23 + expectedClientId: 0<<31 | 255<<23, }, - "Sell, Block Height Odd, Layer 0": { + "Sell, Layer 0": { side: clobtypes.Order_SIDE_SELL, // 1<<31 - blockHeight: 12345654321, // 1<<30 - layer: 0, // 0<<22 - expectedClientId: 1<<31 | 1<<30 | 0<<22, + layer: 0, // 0<<23 + expectedClientId: 1<<31 | 0<<23, }, - "Sell, Block Height Odd (negative), Layer 202": { - side: clobtypes.Order_SIDE_SELL, // 1<<31 - // Negative block height shouldn't happen but blockHeight - // is represented as int64. - blockHeight: -678987, // 1<<30 - layer: 202, // 202<<22 - expectedClientId: 1<<31 | 1<<30 | 202<<22, + "Sell, Layer 202": { + side: clobtypes.Order_SIDE_SELL, // 1<<31 + layer: 202, // 202<<23 + expectedClientId: 1<<31 | 202<<23, }, - "Buy, Block Height Even (zero), Layer 157": { + "Buy, Layer 157": { side: clobtypes.Order_SIDE_SELL, // 1<<31 - blockHeight: 0, // 0<<30 - layer: 157, // 157<<22 - expectedClientId: 1<<31 | 0<<30 | 157<<22, + layer: 157, // 157<<23 + expectedClientId: 1<<31 | 157<<23, }, } @@ -763,7 +723,7 @@ func TestGetVaultClobOrderClientId(t *testing.T) { ctx := tApp.InitChain() clientId := tApp.App.VaultKeeper.GetVaultClobOrderClientId( - ctx.WithBlockHeight(tc.blockHeight), + ctx, tc.side, tc.layer, ) diff --git a/protocol/x/vault/types/expected_keepers.go b/protocol/x/vault/types/expected_keepers.go index af355239d9..10549cf5ec 100644 --- a/protocol/x/vault/types/expected_keepers.go +++ b/protocol/x/vault/types/expected_keepers.go @@ -23,6 +23,7 @@ type ClobKeeper interface { HandleMsgCancelOrder( ctx sdk.Context, msg *clobtypes.MsgCancelOrder, + isInternalOrder bool, ) (err error) HandleMsgPlaceOrder( ctx sdk.Context,