Skip to content

Commit

Permalink
Merge pull request #731 from arunagrawal84/3.11
Browse files Browse the repository at this point in the history
Backup Version 2.0 Milestone 2.
  • Loading branch information
arunagrawal-84 authored Oct 8, 2018
2 parents d615e6a + 5b6b48f commit 1ad0955
Show file tree
Hide file tree
Showing 89 changed files with 2,711 additions and 2,761 deletions.
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,23 @@
# Changelog
## 2018/10/08 3.11.33
***WARNING*** THIS IS A BREAKING RELEASE
### New Feature
* (#731) Restores will be async in nature by default.
* (#731) Support for async snapshots via configuration - `priam.async.snapshot`. Similar support for async incrementals via configuration - `priam.async.incremental`.
* (#731) Better metrics for upload and download to/from remote file system.
* (#731) Better support for include/exclude keyspaces/columnfamilies from backup, incremental backup and restores.
* (#731) Expose priam configuration over HTTP and persist at regular interval (CRON) to local file system for automation/tooling.
### Bug fix
* (#731) Metrics are incremented only once and in a central location at AbstractFileSystem.
* (#731) Remove deprecated AWS API Calls.
### Breaking changes
* (#731) Removal of MBeans to collect metrics from S3FileSystem. They were unreliable and incorrect.
* (#731) Update to backup configurations :- isIncrBackupParallelEnabled, getIncrementalBkupMaxConsumers, getIncrementalBkupQueueSize. They are renamed to ensure naming consistency. Refer to wiki for more details.
* (#731) Changes to backup/restore configuration :- getSnapshotKeyspaceFilters, getSnapshotCFFilter, getIncrementalKeyspaceFilters, getIncrementalCFFilter, getRestoreKeyspaceFilter, getRestoreCFFilter. They are now centralized to ensure that we can support both include and exclude keyspaces/CF. Refer to wiki for more details.

## 2018/10/02 3.11.32
* (#727) Bug Fix: Continue uploading incrementals when parallel incrementals is enabled and file fails to upload.
* (#718) Add last modified time to S3 Object Metadata.

## 2018/09/10 3.11.31
* (#715) Bug Fix: Fix the bootstrap issue. Do not provide yourself as seed node if cluster is already up and running as it will lead to data loss.
Expand Down
30 changes: 17 additions & 13 deletions priam/src/main/java/com/netflix/priam/PriamServer.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@
import com.netflix.priam.backup.CommitLogBackupTask;
import com.netflix.priam.backup.IncrementalBackup;
import com.netflix.priam.backup.SnapshotBackup;
import com.netflix.priam.backup.parallel.IncrementalBackupProducer;
import com.netflix.priam.cluster.management.Compaction;
import com.netflix.priam.cluster.management.Flush;
import com.netflix.priam.cluster.management.IClusterManagement;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.config.PriamConfigurationPersister;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.restore.RestoreContext;
Expand Down Expand Up @@ -98,13 +98,8 @@ else if (UpdateSecuritySettings.firstTimeUpdated)

// Start the Incremental backup schedule if enabled
if (config.isIncrBackup()) {
if (!config.isIncrBackupParallelEnabled()) {
scheduler.addTask(IncrementalBackup.JOBNAME, IncrementalBackup.class, IncrementalBackup.getTimer());
logger.info("Added incremental synchronous bkup");
} else {
scheduler.addTask(IncrementalBackupProducer.JOBNAME, IncrementalBackupProducer.class, IncrementalBackupProducer.getTimer());
logger.info("Added incremental async-synchronous bkup, next fired time: {}", IncrementalBackupProducer.getTimer().getTrigger().getNextFireTime());
}
scheduler.addTask(IncrementalBackup.JOBNAME, IncrementalBackup.class, IncrementalBackup.getTimer());
logger.info("Added incremental backup job");
}

}
Expand All @@ -115,7 +110,7 @@ else if (UpdateSecuritySettings.firstTimeUpdated)


// Determine if we need to restore from backup else start cassandra.
if (restoreContext.isRestoreEnabled()){
if (restoreContext.isRestoreEnabled()) {
restoreContext.restore();
} else { //no restores needed
logger.info("No restore needed, task not scheduled");
Expand All @@ -134,28 +129,37 @@ else if (UpdateSecuritySettings.firstTimeUpdated)
scheduler.addTaskWithDelay(CassandraMonitor.JOBNAME, CassandraMonitor.class, CassandraMonitor.getTimer(), CASSANDRA_MONITORING_INITIAL_DELAY);


//Set cleanup
// Set cleanup
scheduler.addTask(UpdateCleanupPolicy.JOBNAME, UpdateCleanupPolicy.class, UpdateCleanupPolicy.getTimer());

//Set up nodetool flush task
// Set up nodetool flush task
TaskTimer flushTaskTimer = Flush.getTimer(config);
if (flushTaskTimer != null) {
scheduler.addTask(IClusterManagement.Task.FLUSH.name(), Flush.class, flushTaskTimer);
logger.info("Added nodetool flush task.");
}

//Set up compaction task
// Set up compaction task
TaskTimer compactionTimer = Compaction.getTimer(config);
if (compactionTimer != null) {
scheduler.addTask(IClusterManagement.Task.COMPACTION.name(), Compaction.class, compactionTimer);
logger.info("Added compaction task.");
}

// Set up the background configuration dumping thread
TaskTimer configurationPersisterTimer = PriamConfigurationPersister.getTimer(config);
if (configurationPersisterTimer != null) {
scheduler.addTask(PriamConfigurationPersister.NAME, PriamConfigurationPersister.class, configurationPersisterTimer);
logger.info("Added configuration persister task with schedule [{}]", configurationPersisterTimer.getCronExpression());
} else {
logger.warn("Priam configuration persister disabled!");
}

//Set up the SnapshotService
setUpSnapshotService();
}

private void setUpSnapshotService() throws Exception{
private void setUpSnapshotService() throws Exception {
TaskTimer snapshotMetaServiceTimer = SnapshotMetaService.getTimer(backupRestoreConfig);
if (snapshotMetaServiceTimer != null) {
scheduler.addTask(SnapshotMetaService.JOBNAME, SnapshotMetaService.class, snapshotMetaServiceTimer);
Expand Down
22 changes: 8 additions & 14 deletions priam/src/main/java/com/netflix/priam/aws/AWSMembership.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
package com.netflix.priam.aws;

import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.amazonaws.services.ec2.model.*;
import com.amazonaws.services.ec2.model.Filter;
import com.google.common.collect.Lists;
Expand Down Expand Up @@ -149,7 +149,7 @@ public void addACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
List<IpPermission> ipPermissions = new ArrayList<>();
ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));

if (this.insEnvIdentity.isClassic()) {
Expand Down Expand Up @@ -204,7 +204,7 @@ public void removeACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
List<IpPermission> ipPermissions = new ArrayList<>();
ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));

if (this.insEnvIdentity.isClassic()) {
Expand Down Expand Up @@ -235,7 +235,7 @@ public List<String> listACL(int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<String> ipPermissions = new ArrayList<String>();
List<String> ipPermissions = new ArrayList<>();

if (this.insEnvIdentity.isClassic()) {

Expand Down Expand Up @@ -295,20 +295,14 @@ public void expandRacMembership(int count) {
}

protected AmazonAutoScaling getAutoScalingClient() {
AmazonAutoScaling client = new AmazonAutoScalingClient(provider.getAwsCredentialProvider());
client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com");
return client;
return AmazonAutoScalingClientBuilder.standard().withCredentials(provider.getAwsCredentialProvider()).withRegion(config.getDC()).build();
}

protected AmazonAutoScaling getCrossAccountAutoScalingClient() {
AmazonAutoScaling client = new AmazonAutoScalingClient(crossAccountProvider.getAwsCredentialProvider());
client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com");
return client;
return AmazonAutoScalingClientBuilder.standard().withCredentials(crossAccountProvider.getAwsCredentialProvider()).withRegion(config.getDC()).build();
}

protected AmazonEC2 getEc2Client() {
AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider());
client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com");
return client;
return AmazonEC2ClientBuilder.standard().withCredentials(provider.getAwsCredentialProvider()).withRegion(config.getDC()).build();
}
}
22 changes: 3 additions & 19 deletions priam/src/main/java/com/netflix/priam/aws/S3BackupPath.java
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,6 @@
* Represents an S3 object key
*/
public class S3BackupPath extends AbstractBackupPath {
/*
* Checking if request came from Cassandra 1.0 or 1.1
* In Cassandra 1.0, Number of path elements = 8
* In Cassandra 1.1, Number of path elements = 9
*/
private static final int NUM_PATH_ELEMENTS_CASS_1_0 = 8;

@Inject
public S3BackupPath(IConfiguration config, InstanceIdentity factory) {
Expand All @@ -43,9 +37,6 @@ public S3BackupPath(IConfiguration config, InstanceIdentity factory) {

/**
* Format of backup path:
* Cassandra 1.0
* BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNP|META]/KEYSPACE/FILE
* Cassandra 1.1
* BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNP|META]/KEYSPACE/COLUMNFAMILY/FILE
*/
@Override
Expand All @@ -57,12 +48,8 @@ public String getRemotePath() {
buff.append(token).append(S3BackupPath.PATH_SEP);
buff.append(formatDate(time)).append(S3BackupPath.PATH_SEP);
buff.append(type).append(S3BackupPath.PATH_SEP);
if (BackupFileType.isDataFile(type)) {
if (isCassandra1_0)
buff.append(keyspace).append(S3BackupPath.PATH_SEP);
else
buff.append(keyspace).append(S3BackupPath.PATH_SEP).append(columnFamily).append(S3BackupPath.PATH_SEP);
}
if (BackupFileType.isDataFile(type))
buff.append(keyspace).append(S3BackupPath.PATH_SEP).append(columnFamily).append(S3BackupPath.PATH_SEP);
buff.append(fileName);
return buff.toString();
}
Expand All @@ -78,8 +65,6 @@ public void parseRemote(String remoteFilePath) {
pieces.add(ele);
}
assert pieces.size() >= 7 : "Too few elements in path " + remoteFilePath;
if (pieces.size() == NUM_PATH_ELEMENTS_CASS_1_0)
setCassandra1_0(true);
baseDir = pieces.get(0);
region = pieces.get(1);
clusterName = pieces.get(2);
Expand All @@ -88,8 +73,7 @@ public void parseRemote(String remoteFilePath) {
type = BackupFileType.valueOf(pieces.get(5));
if (BackupFileType.isDataFile(type)) {
keyspace = pieces.get(6);
if (!isCassandra1_0)
columnFamily = pieces.get(7);
columnFamily = pieces.get(7);
}
// append the rest
fileName = pieces.get(pieces.size() - 1);
Expand Down
Loading

0 comments on commit 1ad0955

Please sign in to comment.