Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[bug][coordinator/kv] delete remote kv dir for table on coordinator server #297

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions fluss-common/src/main/java/com/alibaba/fluss/utils/FlussPaths.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import com.alibaba.fluss.fs.FsPath;
import com.alibaba.fluss.metadata.PhysicalTablePath;
import com.alibaba.fluss.metadata.TableBucket;
import com.alibaba.fluss.metadata.TablePath;
import com.alibaba.fluss.remote.RemoteLogSegment;
import com.alibaba.fluss.utils.types.Tuple2;

Expand Down Expand Up @@ -586,6 +587,34 @@ public static FsPath remoteKvTabletDir(
return new FsPath(remoteTableDir, String.valueOf(tableBucket.getBucket()));
}

/**
* Returns the remote directory path for storing kv snapshot files or log segments file of
* table.
*
* <p>The path contract:
*
* <pre>
* Remote kv table dir
* {$remote.data.dir}/kv/{databaseName}/{tableName}-{tableId}
*
* Remote log table dir.
* {$remote.data.dir}/log/{databaseName}/{tableName}-{tableId}
*
* </pre>
*
* @param remoteKvOrLogBaseDir - the remote kv snapshots or log segments root dir of table.
* @param tablePath - table path.
* @param tableId - table id.
*/
public static FsPath remoteTableDir(
FsPath remoteKvOrLogBaseDir, TablePath tablePath, long tableId) {
return new FsPath(
remoteKvOrLogBaseDir,
String.format(
"%s/%s-%s",
tablePath.getDatabaseName(), tablePath.getTableName(), tableId));
}

/**
* Returns the remote directory path for storing kv snapshot exclusive files (manifest and
* CURRENT files).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,13 +141,15 @@ public class CoordinatorEventProcessor implements EventProcessor {

public CoordinatorEventProcessor(
ZooKeeperClient zooKeeperClient,
RemoteStorageCleaner remoteStorageCleaner,
ServerMetadataCache serverMetadataCache,
CoordinatorChannelManager coordinatorChannelManager,
CompletedSnapshotStoreManager completedSnapshotStoreManager,
AutoPartitionManager autoPartitionManager,
CoordinatorMetricGroup coordinatorMetricGroup) {
this(
zooKeeperClient,
remoteStorageCleaner,
serverMetadataCache,
coordinatorChannelManager,
new CoordinatorContext(),
Expand All @@ -158,6 +160,7 @@ public CoordinatorEventProcessor(

public CoordinatorEventProcessor(
ZooKeeperClient zooKeeperClient,
RemoteStorageCleaner remoteStorageCleaner,
ServerMetadataCache serverMetadataCache,
CoordinatorChannelManager coordinatorChannelManager,
CoordinatorContext coordinatorContext,
Expand Down Expand Up @@ -186,7 +189,8 @@ public CoordinatorEventProcessor(
metaDataManager,
coordinatorContext,
replicaStateMachine,
tableBucketStateMachine);
tableBucketStateMachine,
remoteStorageCleaner);
this.tableChangeWatcher = new TableChangeWatcher(zooKeeperClient, coordinatorEventManager);
this.tabletServerChangeWatcher =
new TabletServerChangeWatcher(zooKeeperClient, coordinatorEventManager);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,7 @@ protected void startServices() throws Exception {
this.coordinatorEventProcessor =
new CoordinatorEventProcessor(
zkClient,
new RemoteStorageCleaner(conf),
metadataCache,
coordinatorChannelManager,
bucketSnapshotManager,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* Copyright (c) 2024 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.alibaba.fluss.server.coordinator;

import com.alibaba.fluss.config.Configuration;
import com.alibaba.fluss.fs.FileSystem;
import com.alibaba.fluss.fs.FsPath;
import com.alibaba.fluss.metadata.TablePath;
import com.alibaba.fluss.utils.FlussPaths;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;

/** A cleaner for cleaning kv snapshots and log segments files of table. */
public class RemoteStorageCleaner {

private static final Logger LOG = LoggerFactory.getLogger(RemoteStorageCleaner.class);

private final FsPath remoteKvDir;

private final FsPath remoteLogDir;

private final FileSystem remoteFileSystem;

public RemoteStorageCleaner(Configuration configuration) throws IOException {
this.remoteKvDir = FlussPaths.remoteKvDir(configuration);
this.remoteLogDir = FlussPaths.remoteLogDir(configuration);
this.remoteFileSystem = remoteKvDir.getFileSystem();
}

public void deleteTableRemoteDir(TablePath tablePath, long tableId) {
deleteDir(tableKvRemoteDir(tablePath, tableId));
deleteDir(tableLogRemoteDir(tablePath, tableId));
}

private void deleteDir(FsPath fsPath) {
try {
if (remoteFileSystem.exists(fsPath)) {
remoteFileSystem.delete(fsPath, true);
LOG.info("Delete table's remote dir {} success.", fsPath);
}
} catch (IOException e) {
LOG.error("Delete table's remote dir {} failed.", fsPath, e);
}
}

private FsPath tableKvRemoteDir(TablePath tablePath, long tableId) {
return FlussPaths.remoteTableDir(remoteKvDir, tablePath, tableId);
}

private FsPath tableLogRemoteDir(TablePath tablePath, long tableId) {
return FlussPaths.remoteTableDir(remoteLogDir, tablePath, tableId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ public class TableManager {
private static final Logger LOG = LoggerFactory.getLogger(TableManager.class);

private final MetaDataManager metaDataManager;
private final RemoteStorageCleaner remoteStorageCleaner;
private final CoordinatorContext coordinatorContext;
private final ReplicaStateMachine replicaStateMachine;
private final TableBucketStateMachine tableBucketStateMachine;
Expand All @@ -49,8 +50,10 @@ public TableManager(
MetaDataManager metaDataManager,
CoordinatorContext coordinatorContext,
ReplicaStateMachine replicaStateMachine,
TableBucketStateMachine tableBucketStateMachine) {
TableBucketStateMachine tableBucketStateMachine,
RemoteStorageCleaner remoteStorageCleaner) {
this.metaDataManager = metaDataManager;
this.remoteStorageCleaner = remoteStorageCleaner;
this.coordinatorContext = coordinatorContext;
this.replicaStateMachine = replicaStateMachine;
this.tableBucketStateMachine = tableBucketStateMachine;
Expand Down Expand Up @@ -244,6 +247,8 @@ private void completeDeleteTable(long tableId) {
replicaStateMachine.handleStateChanges(replicas, ReplicaState.NonExistentReplica);
try {
metaDataManager.completeDeleteTable(tableId);
TablePath tablePath = coordinatorContext.getTablePathById(tableId);
remoteStorageCleaner.deleteTableRemoteDir(tablePath, tableId);
} catch (Exception e) {
LOG.error("Fail to complete table deletion for table {}.", tableId, e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package com.alibaba.fluss.server.coordinator;

import com.alibaba.fluss.cluster.ServerNode;
import com.alibaba.fluss.config.ConfigOptions;
import com.alibaba.fluss.config.Configuration;
import com.alibaba.fluss.exception.FencedLeaderEpochException;
import com.alibaba.fluss.exception.InvalidCoordinatorException;
Expand Down Expand Up @@ -62,6 +63,7 @@
import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.api.io.TempDir;

import java.io.IOException;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Arrays;
Expand Down Expand Up @@ -115,6 +117,8 @@ class CoordinatorEventProcessorTest {
private CompletedSnapshotStoreManager completedSnapshotStoreManager;
private AutoPartitionManager autoPartitionManager;

private RemoteStorageCleaner remoteStorageCleaner;

@BeforeAll
static void baseBeforeAll() throws Exception {
zookeeperClient =
Expand All @@ -130,16 +134,20 @@ static void baseBeforeAll() throws Exception {
}

@BeforeEach
void beforeEach() {
void beforeEach() throws IOException {
serverMetadataCache = new ServerMetadataCacheImpl();
// set a test channel manager for the context
testCoordinatorChannelManager = new TestCoordinatorChannelManager();
completedSnapshotStoreManager = new CompletedSnapshotStoreManager(1, 1, zookeeperClient);
autoPartitionManager =
new AutoPartitionManager(serverMetadataCache, zookeeperClient, new Configuration());
Configuration conf = new Configuration();
conf.setString(ConfigOptions.REMOTE_DATA_DIR, "/tmp/fluss/remote-data");
remoteStorageCleaner = new RemoteStorageCleaner(conf);
eventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
testCoordinatorChannelManager,
completedSnapshotStoreManager,
Expand Down Expand Up @@ -199,6 +207,7 @@ void testCreateAndDropTable() throws Exception {
eventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
testCoordinatorChannelManager,
completedSnapshotStoreManager,
Expand Down Expand Up @@ -389,6 +398,7 @@ void testServerBecomeOnlineAndOfflineLine() throws Exception {
eventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
testCoordinatorChannelManager,
completedSnapshotStoreManager,
Expand Down Expand Up @@ -437,6 +447,7 @@ void testRestartTriggerReplicaToOffline() throws Exception {
eventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
testCoordinatorChannelManager,
completedSnapshotStoreManager,
Expand Down Expand Up @@ -623,6 +634,7 @@ void testCreateAndDropPartition() throws Exception {
eventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
testCoordinatorChannelManager,
completedSnapshotStoreManager,
Expand Down Expand Up @@ -746,8 +758,13 @@ private void verifyTableDropped(CoordinatorContext coordinatorContext, long tabl
Duration.ofMinutes(1),
() -> assertThat(zookeeperClient.getTableAssignment(tableId)).isEmpty());
// no replica and bucket for the table/partition should exist in the context
assertThat(coordinatorContext.getAllBucketsForTable(tableId)).isEmpty();
assertThat(coordinatorContext.getAllReplicasForTable(tableId)).isEmpty();
retry(
Duration.ofMinutes(1),
() -> assertThat(coordinatorContext.getAllBucketsForTable(tableId)).isEmpty());

retry(
Duration.ofMinutes(1),
() -> assertThat(coordinatorContext.getAllReplicasForTable(tableId)).isEmpty());
}

private void verifyPartitionDropped(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
package com.alibaba.fluss.server.coordinator;

import com.alibaba.fluss.cluster.ServerNode;
import com.alibaba.fluss.config.ConfigOptions;
import com.alibaba.fluss.config.Configuration;
import com.alibaba.fluss.metadata.TableBucket;
import com.alibaba.fluss.metadata.TableBucketReplica;
import com.alibaba.fluss.server.coordinator.event.CoordinatorEvent;
Expand All @@ -41,6 +43,7 @@

import javax.annotation.Nullable;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
Expand All @@ -50,6 +53,7 @@

import static com.alibaba.fluss.record.TestData.DATA1_TABLE_ID;
import static com.alibaba.fluss.record.TestData.DATA1_TABLE_PATH;
import static com.alibaba.fluss.record.TestData.DATA1_TABLE_PATH_PK;
import static com.alibaba.fluss.server.coordinator.statemachine.BucketState.OnlineBucket;
import static com.alibaba.fluss.server.coordinator.statemachine.ReplicaState.OnlineReplica;
import static com.alibaba.fluss.server.coordinator.statemachine.ReplicaState.ReplicaDeletionSuccessful;
Expand Down Expand Up @@ -78,7 +82,7 @@ static void baseBeforeAll() {
}

@BeforeEach
void beforeEach() {
void beforeEach() throws IOException {
initTableManager();
}

Expand All @@ -89,10 +93,12 @@ void afterEach() {
}
}

private void initTableManager() {
private void initTableManager() throws IOException {
testingEventManager = new TestingEventManager();
coordinatorContext = new CoordinatorContext();
testCoordinatorChannelManager = new TestCoordinatorChannelManager();
Configuration conf = new Configuration();
conf.setString(ConfigOptions.REMOTE_DATA_DIR, "/tmp/fluss/remote-data");
CoordinatorRequestBatch coordinatorRequestBatch =
new CoordinatorRequestBatch(testCoordinatorChannelManager, testingEventManager);
ReplicaStateMachine replicaStateMachine =
Expand All @@ -106,7 +112,8 @@ private void initTableManager() {
metaDataManager,
coordinatorContext,
replicaStateMachine,
tableBucketStateMachine);
tableBucketStateMachine,
new RemoteStorageCleaner(conf));
tableManager.startup();

coordinatorContext.setLiveTabletServers(
Expand Down Expand Up @@ -140,7 +147,7 @@ void testDeleteTable() throws Exception {
TableAssignment assignment = createAssignment();
zookeeperClient.registerTableAssignment(tableId, assignment);

tableManager.onCreateNewTable(DATA1_TABLE_PATH, tableId, assignment);
tableManager.onCreateNewTable(DATA1_TABLE_PATH_PK, tableId, assignment);

// now, delete the created table
coordinatorContext.queueTableDeletion(Collections.singleton(tableId));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import com.alibaba.fluss.server.coordinator.CoordinatorEventProcessor;
import com.alibaba.fluss.server.coordinator.CoordinatorRequestBatch;
import com.alibaba.fluss.server.coordinator.CoordinatorTestUtils;
import com.alibaba.fluss.server.coordinator.RemoteStorageCleaner;
import com.alibaba.fluss.server.coordinator.TestCoordinatorChannelManager;
import com.alibaba.fluss.server.coordinator.event.CoordinatorEventManager;
import com.alibaba.fluss.server.metadata.ServerMetadataCache;
Expand All @@ -48,6 +49,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collections;
Expand Down Expand Up @@ -76,6 +78,7 @@ class TableBucketStateMachineTest {
private CoordinatorRequestBatch coordinatorRequestBatch;
private CompletedSnapshotStoreManager completedSnapshotStoreManager;
private AutoPartitionManager autoPartitionManager;
private RemoteStorageCleaner remoteStorageCleaner;

@BeforeAll
static void baseBeforeAll() {
Expand All @@ -86,9 +89,10 @@ static void baseBeforeAll() {
}

@BeforeEach
void beforeEach() {
void beforeEach() throws IOException {
Configuration conf = new Configuration();
conf.setString(ConfigOptions.COORDINATOR_HOST, "localhost");
conf.setString(ConfigOptions.REMOTE_DATA_DIR, "/tmp/fluss/remote-data");
coordinatorContext = new CoordinatorContext();
testCoordinatorChannelManager = new TestCoordinatorChannelManager();
coordinatorRequestBatch =
Expand All @@ -101,6 +105,7 @@ void beforeEach() {
completedSnapshotStoreManager = new CompletedSnapshotStoreManager(1, 1, zookeeperClient);
autoPartitionManager =
new AutoPartitionManager(serverMetadataCache, zookeeperClient, new Configuration());
remoteStorageCleaner = new RemoteStorageCleaner(conf);
}

@Test
Expand Down Expand Up @@ -227,6 +232,7 @@ void testStateChangeToOnline() throws Exception {
CoordinatorEventProcessor coordinatorEventProcessor =
new CoordinatorEventProcessor(
zookeeperClient,
remoteStorageCleaner,
serverMetadataCache,
new CoordinatorChannelManager(
RpcClient.create(
Expand Down
Loading
Loading