Skip to content
This repository has been archived by the owner on Mar 3, 2024. It is now read-only.

Hw5 Данилин Андрей ФИТиП #287

Open
wants to merge 37 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
61cf00f
hw2 initial commit
AndrewDanilin Oct 4, 2023
041ffd5
fix MyFactory constructor
AndrewDanilin Oct 4, 2023
271099a
use try with resources
AndrewDanilin Oct 4, 2023
a9aa9f2
Merge branch 'main' into main
AndrewDanilin Oct 4, 2023
3b19f78
Merge branch 'main' into main
incubos Oct 5, 2023
bf6da3d
Merge branch 'main' into main
incubos Oct 5, 2023
bac739d
Merge branch 'main' into main
incubos Oct 5, 2023
7efed23
Merge branch 'main' into main
daniil-ushkov Oct 13, 2023
5b0ec1d
Merge branch 'main' into main
daniil-ushkov Oct 16, 2023
dae9877
Merge branch 'main' of github.com:polis-vk/2023-nosql-lsm
AndrewDanilin Nov 1, 2023
5e7503b
Взял референсное решение 3 ДЗ с практики, потому что сам не успел до …
AndrewDanilin Nov 1, 2023
5af02fa
Merge branch 'polis-vk:main' into main
AndrewDanilin Nov 1, 2023
2b5a8c7
hw4 added
AndrewDanilin Nov 1, 2023
0c0b7e9
Merge branch 'main' of https://github.com/AndrewDanilin/2023-nosql-lsm
AndrewDanilin Nov 1, 2023
e488f72
fix codeclimate problems
AndrewDanilin Nov 1, 2023
ed2335f
создал новый класс DiskStorageWithCompact, где лежит только метод com…
AndrewDanilin Nov 1, 2023
c033d66
создал новый класс DiskStorageWithCompact, где лежит только метод com…
AndrewDanilin Nov 1, 2023
25e14e0
fix codeclimate problem
AndrewDanilin Nov 1, 2023
3512922
fix codeclimate problem
AndrewDanilin Nov 1, 2023
2e9471f
fix codeclimate problem
AndrewDanilin Nov 1, 2023
8f1bd95
fix codeclimate problem
AndrewDanilin Nov 1, 2023
276be0e
fix codeclimate problem
AndrewDanilin Nov 1, 2023
cd03e74
fix codeclimate problem
AndrewDanilin Nov 1, 2023
e25c3ea
Merge branch 'main' into main
incubos Nov 2, 2023
e3323f3
исправлены замечания
AndrewDanilin Nov 22, 2023
de028f2
Merge remote-tracking branch 'origin/main'
AndrewDanilin Nov 22, 2023
d4c698c
some fixes
AndrewDanilin Nov 22, 2023
c668ee5
fix codeclimate problems
AndrewDanilin Nov 22, 2023
93ef66d
hw5
AndrewDanilin Nov 22, 2023
dcb6def
Merge branch 'main' into hw5
incubos Nov 23, 2023
47ac7c1
fixes
AndrewDanilin Nov 23, 2023
65184fe
fix codeclimate
AndrewDanilin Nov 23, 2023
204d486
fix codeclimate
AndrewDanilin Nov 23, 2023
2bfffac
fix codeclimate
AndrewDanilin Nov 23, 2023
52e1b34
fixes
AndrewDanilin Nov 23, 2023
03207f9
fixes
AndrewDanilin Nov 23, 2023
3911ca1
fixes
AndrewDanilin Nov 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
250 changes: 250 additions & 0 deletions src/main/java/ru/vk/itmo/danilinandrew/DiskStorage.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,250 @@
package ru.vk.itmo.danilinandrew;

import ru.vk.itmo.Entry;

import java.io.IOException;
import java.lang.foreign.Arena;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.ValueLayout;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;

public class DiskStorage {

public static final String SSTABLE_PREFIX = "sstable_";
private final List<MemorySegment> segmentList;

public DiskStorage(List<MemorySegment> segmentList) {
this.segmentList = segmentList;
}

public Iterator<Entry<MemorySegment>> range(
List<Iterator<Entry<MemorySegment>>> iters,
MemorySegment from,
MemorySegment to) {
List<Iterator<Entry<MemorySegment>>> iterators = new ArrayList<>(segmentList.size() + 1);
for (MemorySegment memorySegment : segmentList) {
iterators.add(DiskStorageExtension.iterator(memorySegment, from, to));
}
iterators.addAll(iters);

return new MergeIterator<>(iterators, Comparator.comparing(Entry::key, StorageDao::compare)) {
@Override
protected boolean shouldSkip(Entry<MemorySegment> memorySegmentEntry) {
return memorySegmentEntry.value() == null;
}
};
}

public void saveNextSSTable(
Path storagePath,
Iterable<Entry<MemorySegment>> iterable,
Arena arena
) throws IOException {
final Path indexTmp = storagePath.resolve("index.tmp");
final Path indexFile = storagePath.resolve("index.idx");

try {
Files.createFile(indexFile);
} catch (FileAlreadyExistsException ignored) {
// it is ok, actually it is normal state
}
List<String> existedFiles = Files.readAllLines(indexFile, StandardCharsets.UTF_8);

String newFileName = SSTABLE_PREFIX + existedFiles.size();

long dataSize = 0;
long count = 0;
for (Entry<MemorySegment> entry : iterable) {
dataSize += entry.key().byteSize();
MemorySegment value = entry.value();
if (value != null) {
dataSize += value.byteSize();
}
count++;
}
long indexSize = count * 2 * Long.BYTES;

try (
FileChannel fileChannel = FileChannel.open(
storagePath.resolve(newFileName),
StandardOpenOption.WRITE,
StandardOpenOption.READ,
StandardOpenOption.CREATE
);
Arena writeArena = Arena.ofConfined()
) {
MemorySegment fileSegment = fileChannel.map(
FileChannel.MapMode.READ_WRITE,
0,
indexSize + dataSize,
writeArena
);

// index:
// |key0_Start|value0_Start|key1_Start|value1_Start|key2_Start|value2_Start|...
// key0_Start = data start = end of index
long dataOffset = indexSize;
int indexOffset = 0;
for (Entry<MemorySegment> entry : iterable) {
fileSegment.set(ValueLayout.JAVA_LONG_UNALIGNED, indexOffset, dataOffset);
dataOffset += entry.key().byteSize();
indexOffset += Long.BYTES;

MemorySegment value = entry.value();
if (value == null) {
fileSegment.set(
ValueLayout.JAVA_LONG_UNALIGNED,
indexOffset,
DiskStorageExtension.tombstone(dataOffset)
);
} else {
fileSegment.set(ValueLayout.JAVA_LONG_UNALIGNED, indexOffset, dataOffset);
dataOffset += value.byteSize();
}
indexOffset += Long.BYTES;
}

// data:
// |key0|value0|key1|value1|...
dataOffset = indexSize;
for (Entry<MemorySegment> entry : iterable) {
MemorySegment key = entry.key();
MemorySegment.copy(key, 0, fileSegment, dataOffset, key.byteSize());
dataOffset += key.byteSize();

MemorySegment value = entry.value();
if (value != null) {
MemorySegment.copy(value, 0, fileSegment, dataOffset, value.byteSize());
dataOffset += value.byteSize();
}
}
}

List<String> list = new ArrayList<>(existedFiles.size() + 1);
list.addAll(existedFiles);
list.add(newFileName);
Files.write(
indexTmp,
list,
StandardOpenOption.WRITE,
StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING
);

Files.deleteIfExists(indexFile);

Files.move(indexTmp, indexFile, StandardCopyOption.ATOMIC_MOVE);

if (arena.scope().isAlive()) {
openNewSSTable(storagePath.resolve(newFileName), arena);
}
}

public void openNewSSTable(Path file, Arena arena) {
try (FileChannel fileChannel = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
MemorySegment fileSegment = fileChannel.map(
FileChannel.MapMode.READ_WRITE,
0,
Files.size(file),
arena
);
segmentList.add(fileSegment);
} catch (IOException e) {
throw new IllegalStateException("Error open after flush", e);
}
}

public static void compact(Path storagePath, Iterable<Entry<MemorySegment>> iterable)
throws IOException {

String newFileName = "compaction.tmp";
Path compactionTmpFile = storagePath.resolve(newFileName);

long dataSize = 0;
long count = 0;
for (Entry<MemorySegment> entry : iterable) {
dataSize += entry.key().byteSize();
MemorySegment value = entry.value();
if (value != null) {
dataSize += value.byteSize();
}
count++;
}
long indexSize = count * 2 * Long.BYTES;

try (
FileChannel fileChannel = FileChannel.open(
compactionTmpFile,
StandardOpenOption.WRITE,
StandardOpenOption.READ,
StandardOpenOption.CREATE
);
Arena writeArena = Arena.ofConfined()
) {
MemorySegment fileSegment = fileChannel.map(
FileChannel.MapMode.READ_WRITE,
0,
indexSize + dataSize,
writeArena
);

// index:
// |key0_Start|value0_Start|key1_Start|value1_Start|key2_Start|value2_Start|...
// key0_Start = data start = end of index
long dataOffset = indexSize;
int indexOffset = 0;
for (Entry<MemorySegment> entry : iterable) {
fileSegment.set(ValueLayout.JAVA_LONG_UNALIGNED, indexOffset, dataOffset);
dataOffset += entry.key().byteSize();
indexOffset += Long.BYTES;

MemorySegment value = entry.value();
if (value == null) {
fileSegment.set(
ValueLayout.JAVA_LONG_UNALIGNED,
indexOffset,
DiskStorageExtension.tombstone(dataOffset)
);
} else {
fileSegment.set(ValueLayout.JAVA_LONG_UNALIGNED, indexOffset, dataOffset);
dataOffset += value.byteSize();
}
indexOffset += Long.BYTES;
}

// data:
// |key0|value0|key1|value1|...
dataOffset = indexSize;
for (Entry<MemorySegment> entry : iterable) {
MemorySegment key = entry.key();
MemorySegment.copy(key, 0, fileSegment, dataOffset, key.byteSize());
dataOffset += key.byteSize();

MemorySegment value = entry.value();
if (value != null) {
MemorySegment.copy(value, 0, fileSegment, dataOffset, value.byteSize());
dataOffset += value.byteSize();
}
}
}

Files.move(
compactionTmpFile,
storagePath.resolve("compaction"),
StandardCopyOption.ATOMIC_MOVE,
StandardCopyOption.REPLACE_EXISTING
);

DiskStorageExtension.finalizeCompaction(storagePath);
}
}
Loading
Loading