Skip to content

Commit

Permalink
update StorageOrder and UploadDeclaration
Browse files Browse the repository at this point in the history
  • Loading branch information
AstaFrode committed Nov 9, 2023
1 parent aab561a commit 834379d
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 231 deletions.
10 changes: 2 additions & 8 deletions chain/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ func (c *chainClient) GenerateStorageOrder(
var err error
var segmentList = make([]pattern.SegmentList, len(segment))
var user pattern.UserBrief
var assignedData = make([][]pattern.FileHash, len(segment))

for i := 0; i < len(segment); i++ {
hash := filepath.Base(segment[i].SegmentHash)
for k := 0; k < len(hash); k++ {
Expand All @@ -301,12 +301,6 @@ func (c *chainClient) GenerateStorageOrder(
}
}
}
for i := 0; i < len(segmentList); i++ {
assignedData[i] = make([]pattern.FileHash, len(segmentList[i].FragmentHash))
for j := 0; j < len(segmentList[i].FragmentHash); j++ {
assignedData[i][j] = segmentList[i].FragmentHash[j]
}
}

acc, err := types.NewAccountID(owner)
if err != nil {
Expand All @@ -315,7 +309,7 @@ func (c *chainClient) GenerateStorageOrder(
user.User = *acc
user.BucketName = types.NewBytes([]byte(buckname))
user.FileName = types.NewBytes([]byte(filename))
return c.UploadDeclaration(roothash, segmentList, assignedData, user, filesize)
return c.UploadDeclaration(roothash, segmentList, user, filesize)
}

func ExtractSegmenthash(segment []string) []string {
Expand Down
180 changes: 2 additions & 178 deletions chain/fileBank.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
"strings"
"time"

"github.com/CESSProject/cess-go-sdk/core/event"
"github.com/CESSProject/cess-go-sdk/core/pattern"
"github.com/CESSProject/cess-go-sdk/core/utils"
"github.com/centrifuge/go-substrate-rpc-client/v4/types"
Expand Down Expand Up @@ -213,51 +212,6 @@ func (c *chainClient) QueryFileMetadataByBlock(roothash string, block uint64) (p
return data, nil
}

// Deprecated: As cess v0.6
func (c *chainClient) QueryFillerMap(filehash string) (pattern.IdleMetadata, error) {
defer func() {
if err := recover(); err != nil {
log.Println(utils.RecoverError(err))
}
}()

var (
data pattern.IdleMetadata
hash pattern.FileHash
)

if !c.GetChainState() {
return data, pattern.ERR_RPC_CONNECTION
}

if len(hash) != len(filehash) {
return data, errors.New("invalid filehash")
}

for i := 0; i < len(hash); i++ {
hash[i] = types.U8(filehash[i])
}

b, err := codec.Encode(hash)
if err != nil {
return data, errors.Wrap(err, "[Encode]")
}

key, err := types.CreateStorageKey(c.metadata, pattern.FILEBANK, pattern.FILLERMAP, c.keyring.PublicKey, b)
if err != nil {
return data, errors.Wrap(err, "[CreateStorageKey]")
}

ok, err := c.api.RPC.State.GetStorageLatest(key, &data)
if err != nil {
return data, errors.Wrap(err, "[GetStorageLatest]")
}
if !ok {
return data, pattern.ERR_RPC_EMPTY_VALUE
}
return data, nil
}

func (c *chainClient) QueryStorageOrder(roothash string) (pattern.StorageOrder, error) {
defer func() {
if err := recover(); err != nil {
Expand Down Expand Up @@ -379,132 +333,6 @@ func (c *chainClient) QueryPendingReplacements_V2(puk []byte) (types.U128, error
return data, nil
}

// Deprecated: As cess v0.6
func (c *chainClient) SubmitIdleMetadata(teeAcc []byte, idlefiles []pattern.IdleMetadata) (string, error) {
c.lock.Lock()
defer func() {
c.lock.Unlock()
if err := recover(); err != nil {
log.Println(utils.RecoverError(err))
}
}()

var (
txhash string
accountInfo types.AccountInfo
)

if !c.GetChainState() {
return txhash, pattern.ERR_RPC_CONNECTION
}

acc, err := types.NewAccountID(teeAcc)
if err != nil {
return txhash, errors.Wrap(err, "[NewAccountID]")
}

call, err := types.NewCall(c.metadata, pattern.TX_FILEBANK_UPLOADFILLER, *acc, idlefiles)
if err != nil {
return txhash, errors.Wrap(err, "[NewCall]")
}

key, err := types.CreateStorageKey(c.metadata, pattern.SYSTEM, pattern.ACCOUNT, c.keyring.PublicKey)
if err != nil {
return txhash, errors.Wrap(err, "[CreateStorageKey]")
}

ok, err := c.api.RPC.State.GetStorageLatest(key, &accountInfo)
if err != nil {
return txhash, errors.Wrap(err, "[GetStorageLatest]")
}
if !ok {
return txhash, pattern.ERR_RPC_EMPTY_VALUE
}

o := types.SignatureOptions{
BlockHash: c.genesisHash,
Era: types.ExtrinsicEra{IsMortalEra: false},
GenesisHash: c.genesisHash,
Nonce: types.NewUCompactFromUInt(uint64(accountInfo.Nonce)),
SpecVersion: c.runtimeVersion.SpecVersion,
Tip: types.NewUCompactFromUInt(0),
TransactionVersion: c.runtimeVersion.TransactionVersion,
}

ext := types.NewExtrinsic(call)

// Sign the transaction
err = ext.Sign(c.keyring, o)
if err != nil {
return txhash, errors.Wrap(err, "[Sign]")
}

// Do the transfer and track the actual status
sub, err := c.api.RPC.Author.SubmitAndWatchExtrinsic(ext)
if err != nil {
c.SetChainState(false)
return txhash, errors.Wrap(err, "[SubmitAndWatchExtrinsic]")
}
defer sub.Unsubscribe()

timeout := time.NewTimer(c.packingTime)
defer timeout.Stop()

for {
select {
case status := <-sub.Chan():
if status.IsInBlock {
events := event.EventRecords{}
txhash, _ = codec.EncodeToHex(status.AsInBlock)
h, err := c.api.RPC.State.GetStorageRaw(c.keyEvents, status.AsInBlock)
if err != nil {
return txhash, errors.Wrap(err, "[GetStorageRaw]")
}
err = types.EventRecordsRaw(*h).DecodeEventRecords(c.metadata, &events)
if err != nil || len(events.FileBank_FillerUpload) > 0 {
return txhash, nil
}
return txhash, errors.New(pattern.ERR_Failed)
}
case err = <-sub.Err():
return txhash, errors.Wrap(err, "[sub]")
case <-timeout.C:
return txhash, pattern.ERR_RPC_TIMEOUT
}
}
}

// Deprecated: As cess v0.6
func (c *chainClient) SubmitIdleFile(teeAcc []byte, idlefiles []pattern.IdleFileMeta) (string, error) {
var submit = make([]pattern.IdleMetadata, 0)
for i := 0; i < len(idlefiles); i++ {
var filehash pattern.FileHash
acc, err := types.NewAccountID(idlefiles[i].MinerAcc)
if err != nil {
continue
}

if len(idlefiles[i].Hash) != len(pattern.FileHash{}) {
continue
}

for j := 0; j < len(idlefiles[i].Hash); j++ {
filehash[j] = types.U8(idlefiles[i].Hash[j])
}

var ele = pattern.IdleMetadata{
BlockNum: types.NewU32(idlefiles[i].BlockNum),
Acc: *acc,
Hash: filehash,
}
submit = append(submit, ele)
if len(submit) >= pattern.MaxSubmitedIdleFileMeta {
break
}
}
return c.SubmitIdleMetadata(teeAcc, submit)
}

func (c *chainClient) CreateBucket(owner_pkey []byte, name string) (string, error) {
c.lock.Lock()
defer func() {
Expand Down Expand Up @@ -690,7 +518,7 @@ func (c *chainClient) DeleteBucket(owner_pkey []byte, name string) (string, erro
}
}

func (c *chainClient) UploadDeclaration(filehash string, dealinfo []pattern.SegmentList, hashs [][]pattern.FileHash, user pattern.UserBrief, filesize uint64) (string, error) {
func (c *chainClient) UploadDeclaration(filehash string, dealinfo []pattern.SegmentList, user pattern.UserBrief, filesize uint64) (string, error) {
c.lock.Lock()
defer func() {
c.lock.Unlock()
Expand All @@ -714,15 +542,11 @@ func (c *chainClient) UploadDeclaration(filehash string, dealinfo []pattern.Segm
hash[i] = types.U8(filehash[i])
}

if len(hashs) > pattern.MaxSegmentNum {
return txhash, errors.New("segment length exceeds limit")
}

if !c.GetChainState() {
return txhash, fmt.Errorf("chainSDK.UploadDeclaration(): GetChainState(): %v", pattern.ERR_RPC_CONNECTION)
}

call, err := types.NewCall(c.metadata, pattern.TX_FILEBANK_UPLOADDEC, hash, dealinfo, hashs, user, types.NewU128(*new(big.Int).SetUint64(filesize)))
call, err := types.NewCall(c.metadata, pattern.TX_FILEBANK_UPLOADDEC, hash, dealinfo, user, types.NewU128(*new(big.Int).SetUint64(filesize)))
if err != nil {
return txhash, errors.Wrap(err, "[NewCall]")
}
Expand Down
70 changes: 31 additions & 39 deletions core/pattern/pattern.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,11 @@ type OssInfo struct {
Domain types.Bytes
}

type BucketInfo struct {
ObjectsList []FileHash
Authority []types.AccountID
}

type MinerInfo struct {
BeneficiaryAcc types.AccountID
PeerId PeerId
Expand All @@ -282,20 +287,20 @@ type SpaceProofInfo struct {
Accumulator Accumulator
}

type RewardOrder struct {
OrderReward types.U128
EachShare types.U128
AwardCount types.U8
HasIssued types.Bool
}

type MinerReward struct {
TotalReward types.U128
RewardIssued types.U128
CurrentlyAvailableReward types.U128
OrderList []RewardOrder
}

type RewardOrder struct {
OrderReward types.U128
EachShare types.U128
AwardCount types.U8
HasIssued types.Bool
}

type FileMetadata struct {
SegmentList []SegmentInfo
Owner []UserBrief
Expand All @@ -304,9 +309,9 @@ type FileMetadata struct {
State types.U8
}

type BucketInfo struct {
ObjectsList []FileHash
Authority []types.AccountID
type SegmentInfo struct {
Hash FileHash
FragmentList []FragmentList
}

type UserBrief struct {
Expand All @@ -315,42 +320,29 @@ type UserBrief struct {
BucketName types.Bytes
}

type SegmentList struct {
SegmentHash FileHash
FragmentHash []FileHash
}

type MinerTaskList struct {
Index types.U8
Miner types.Option[types.AccountID]
FragmentList []FileHash
}

type SegmentInfo struct {
Hash FileHash
FragmentList []FragmentList
}

type FragmentList struct {
Hash FileHash
Avail types.Bool
Miner types.AccountID
}

type StorageOrder struct {
Stage types.U8
Count types.U8
FileSize types.U128
SegmentList []SegmentList
User UserBrief
MinerTaskList []MinerTaskList
CompleteList []types.AccountID
}

type IdleMetadata struct {
BlockNum types.U32
Acc types.AccountID
Hash FileHash
Stage types.U8
Count types.U8
FileSize types.U128
SegmentList []SegmentList
User UserBrief
CompleteInfo []CompleteInfo
}

type SegmentList struct {
SegmentHash FileHash
FragmentHash []FileHash
}

type CompleteInfo struct {
Index types.U8
Miner types.AccountID
}

type UserSpaceInfo struct {
Expand Down
7 changes: 1 addition & 6 deletions core/sdk/sdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ type SDK interface {
QueryFileMetadata(roothash string) (pattern.FileMetadata, error)
// QueryFileMetadataByBlock queries the metadata of the roothash file.
QueryFileMetadataByBlock(roothash string, block uint64) (pattern.FileMetadata, error)
// QueryFillerMap queries filler information.
QueryFillerMap(filehash string) (pattern.IdleMetadata, error)
// QueryPendingReplacements queries the amount of idle data that can be replaced.
QueryPendingReplacements(puk []byte) (uint32, error)
// QueryPendingReplacements queries the amount of idle data that can be replaced.
Expand Down Expand Up @@ -96,12 +94,9 @@ type SDK interface {
SubmitFileReport(index types.U8, roothash pattern.FileHash) (string, error)
ReportFile(index uint8, roothash string) (string, error)
// UploadDeclaration creates a storage order.
UploadDeclaration(filehash string, dealinfo []pattern.SegmentList, hashs [][]pattern.FileHash, user pattern.UserBrief, filesize uint64) (string, error)
UploadDeclaration(filehash string, dealinfo []pattern.SegmentList, user pattern.UserBrief, filesize uint64) (string, error)
// GenerateStorageOrder for generating storage orders
GenerateStorageOrder(roothash string, segment []pattern.SegmentDataInfo, owner []byte, filename, buckname string, filesize uint64) (string, error)
// SubmitIdleMetadata Submit idle file metadata.
SubmitIdleMetadata(teeAcc []byte, idlefiles []pattern.IdleMetadata) (string, error)
SubmitIdleFile(teeAcc []byte, idlefiles []pattern.IdleFileMeta) (string, error)
// CertIdleSpace
CertIdleSpace(idleSignInfo pattern.SpaceProofInfo, sign pattern.TeeSignature) (string, error)
// ReplaceIdleSpace
Expand Down

0 comments on commit 834379d

Please sign in to comment.